code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase : str = { """configuration_whisper""": ["""WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WhisperConfig""", """WhisperOnnxConfig"""], """feature_extraction_whisper""": ["""WhisperFeatureExtractor"""], """processing_whisper""": ["""WhisperProcessor"""], """tokenization_whisper""": ["""WhisperTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Any = ["""WhisperTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : int = [ """WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""", """WhisperForConditionalGeneration""", """WhisperModel""", """WhisperPreTrainedModel""", """WhisperForAudioClassification""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : int = [ """TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFWhisperForConditionalGeneration""", """TFWhisperModel""", """TFWhisperPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : str = [ """FlaxWhisperForConditionalGeneration""", """FlaxWhisperModel""", """FlaxWhisperPreTrainedModel""", """FlaxWhisperForAudioClassification""", ] if TYPE_CHECKING: from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig from .feature_extraction_whisper import WhisperFeatureExtractor from .processing_whisper import WhisperProcessor from .tokenization_whisper import WhisperTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_whisper_fast import WhisperTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_whisper import ( WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_whisper import ( TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_whisper import ( FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, FlaxWhisperPreTrainedModel, ) else: import sys lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
13
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 UpperCAmelCase_ = get_tests_dir('fixtures') UpperCAmelCase_ = get_tests_dir('fixtures/dummy_feature_extractor_config.json') UpperCAmelCase_ = get_tests_dir('fixtures/dummy-config.json') class lowerCamelCase__( unittest.TestCase): def lowerCAmelCase__ ( self: List[str] ): __lowerCamelCase = 0 def lowerCAmelCase__ ( self: List[str] ): __lowerCamelCase = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) def lowerCAmelCase__ ( self: Union[str, Any] ): __lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) def lowerCAmelCase__ ( self: int ): with tempfile.TemporaryDirectory() as tmpdirname: __lowerCamelCase = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally __lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ ).to_dict() config_dict.pop("""feature_extractor_type""" ) __lowerCamelCase = WavaVecaFeatureExtractor(**UpperCamelCase_ ) # save in new folder model_config.save_pretrained(UpperCamelCase_ ) config.save_pretrained(UpperCamelCase_ ) __lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ ) # make sure private variable is not incorrectly saved __lowerCamelCase = json.loads(config.to_json_string() ) self.assertTrue("""_processor_class""" not in dict_as_saved ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) def lowerCAmelCase__ ( self: Tuple ): __lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) def lowerCAmelCase__ ( self: int ): with self.assertRaisesRegex( UpperCamelCase_ , """bert-base is not a local folder and is not a valid model identifier""" ): __lowerCamelCase = AutoFeatureExtractor.from_pretrained("""bert-base""" ) def lowerCAmelCase__ ( self: Tuple ): with self.assertRaisesRegex( UpperCamelCase_ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): __lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ , revision="""aaaaaa""" ) def lowerCAmelCase__ ( self: Optional[Any] ): with self.assertRaisesRegex( UpperCamelCase_ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ): __lowerCamelCase = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" ) def lowerCAmelCase__ ( self: Tuple ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(UpperCamelCase_ ): __lowerCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(UpperCamelCase_ ): __lowerCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ ) __lowerCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(UpperCamelCase_ ) __lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ ) self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) def lowerCAmelCase__ ( self: Any ): try: AutoConfig.register("""custom""" , UpperCamelCase_ ) AutoFeatureExtractor.register(UpperCamelCase_ , UpperCamelCase_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(UpperCamelCase_ ): AutoFeatureExtractor.register(UpperCamelCase_ , UpperCamelCase_ ) # Now that the config is registered, it can be used as any other config with the auto-API __lowerCamelCase = CustomFeatureExtractor.from_pretrained(UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(UpperCamelCase_ ) __lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def lowerCAmelCase__ ( self: Dict ): class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : str = True try: AutoConfig.register("""custom""" , UpperCamelCase_ ) AutoFeatureExtractor.register(UpperCamelCase_ , UpperCamelCase_ ) # If remote code is not set, the default is to use local __lowerCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. __lowerCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub __lowerCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(not hasattr(UpperCamelCase_ , """is_local""" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
12
0
def UpperCAmelCase_( a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = [0] * len(a__ ) for i in range(1 , len(a__ ) ): # use last results for better performance - dynamic programming SCREAMING_SNAKE_CASE : Union[str, Any] = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: SCREAMING_SNAKE_CASE : Optional[int] = prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 SCREAMING_SNAKE_CASE : int = j return prefix_result def UpperCAmelCase_( a__ ): """simple docstring""" return max(prefix_function(a__ ) ) if __name__ == "__main__": import doctest doctest.testmod()
19
import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class a_ : """simple docstring""" def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=32 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=[10, 20, 30, 40] , _lowerCamelCase=[2, 2, 3, 2] , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=10 , _lowerCamelCase=0.0_2 , _lowerCamelCase=["stage2", "stage3", "stage4"] , _lowerCamelCase=3 , _lowerCamelCase=None , ) ->Dict: SCREAMING_SNAKE_CASE : Any = parent SCREAMING_SNAKE_CASE : Optional[Any] = batch_size SCREAMING_SNAKE_CASE : Optional[Any] = image_size SCREAMING_SNAKE_CASE : str = num_channels SCREAMING_SNAKE_CASE : Any = num_stages SCREAMING_SNAKE_CASE : List[str] = hidden_sizes SCREAMING_SNAKE_CASE : Optional[Any] = depths SCREAMING_SNAKE_CASE : Any = is_training SCREAMING_SNAKE_CASE : Tuple = use_labels SCREAMING_SNAKE_CASE : Any = intermediate_size SCREAMING_SNAKE_CASE : Dict = hidden_act SCREAMING_SNAKE_CASE : Optional[Any] = type_sequence_label_size SCREAMING_SNAKE_CASE : str = initializer_range SCREAMING_SNAKE_CASE : int = out_features SCREAMING_SNAKE_CASE : List[str] = num_labels SCREAMING_SNAKE_CASE : int = scope SCREAMING_SNAKE_CASE : Optional[Any] = num_stages def __lowerCAmelCase ( self ) ->Dict: SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE : str = None if self.use_labels: SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_config() return config, pixel_values, labels def __lowerCAmelCase ( self ) ->List[Any]: return ConvNextConfig( num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , ) def __lowerCAmelCase ( self ) ->Any: return UperNetConfig( backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_lowerCamelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=_lowerCamelCase , loss_ignore_index=255 , num_labels=self.num_labels , ) def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Any: SCREAMING_SNAKE_CASE : List[Any] = UperNetForSemanticSegmentation(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE : Tuple = model(_lowerCamelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def __lowerCAmelCase ( self ) ->Tuple: SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ) : Tuple = config_and_inputs SCREAMING_SNAKE_CASE : Optional[int] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class a_ ( a__ , a__ , unittest.TestCase ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = (UperNetForSemanticSegmentation,) if is_torch_available() else () __SCREAMING_SNAKE_CASE : List[str] = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {} __SCREAMING_SNAKE_CASE : Tuple = False __SCREAMING_SNAKE_CASE : Union[str, Any] = False __SCREAMING_SNAKE_CASE : Any = False __SCREAMING_SNAKE_CASE : Tuple = False __SCREAMING_SNAKE_CASE : Dict = False __SCREAMING_SNAKE_CASE : Any = False def __lowerCAmelCase ( self ) ->Tuple: SCREAMING_SNAKE_CASE : Optional[Any] = UperNetModelTester(self ) SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 ) def __lowerCAmelCase ( self ) ->str: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __lowerCAmelCase ( self ) ->str: return def __lowerCAmelCase ( self ) ->Tuple: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : int = model_class(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE : Optional[int] = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE : Union[str, Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _lowerCamelCase ) def __lowerCAmelCase ( self ) ->Union[str, Any]: SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCamelCase ) @unittest.skip(reason='''UperNet does not use inputs_embeds''' ) def __lowerCAmelCase ( self ) ->Union[str, Any]: pass @unittest.skip(reason='''UperNet does not support input and output embeddings''' ) def __lowerCAmelCase ( self ) ->int: pass @unittest.skip(reason='''UperNet does not have a base model''' ) def __lowerCAmelCase ( self ) ->int: pass @unittest.skip(reason='''UperNet does not have a base model''' ) def __lowerCAmelCase ( self ) ->str: pass @require_torch_multi_gpu @unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def __lowerCAmelCase ( self ) ->str: pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def __lowerCAmelCase ( self ) ->Tuple: pass def __lowerCAmelCase ( self ) ->int: def check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE : List[str] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) ) SCREAMING_SNAKE_CASE : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.num_stages self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : Optional[int] = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE : Union[str, Any] = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def __lowerCAmelCase ( self ) ->Any: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE : str = _config_zero_init(_lowerCamelCase ) SCREAMING_SNAKE_CASE : List[str] = _config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : int = model_class(config=_lowerCamelCase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @unittest.skip(reason='''UperNet does not have tied weights''' ) def __lowerCAmelCase ( self ) ->List[Any]: pass @slow def __lowerCAmelCase ( self ) ->List[Any]: for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE : Any = UperNetForSemanticSegmentation.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) def UpperCAmelCase_( ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = hf_hub_download( repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' ) SCREAMING_SNAKE_CASE : Any = Image.open(a__ ).convert('''RGB''' ) return image @require_torch @require_vision @slow class a_ ( unittest.TestCase ): """simple docstring""" def __lowerCAmelCase ( self ) ->Dict: SCREAMING_SNAKE_CASE : int = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' ) SCREAMING_SNAKE_CASE : Tuple = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(_lowerCamelCase ) SCREAMING_SNAKE_CASE : str = prepare_img() SCREAMING_SNAKE_CASE : Tuple = processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase ) with torch.no_grad(): SCREAMING_SNAKE_CASE : Optional[Any] = model(**_lowerCamelCase ) SCREAMING_SNAKE_CASE : Tuple = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , _lowerCamelCase ) SCREAMING_SNAKE_CASE : List[str] = torch.tensor( [[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ).to(_lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowerCamelCase , atol=1e-4 ) ) def __lowerCAmelCase ( self ) ->int: SCREAMING_SNAKE_CASE : List[str] = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' ) SCREAMING_SNAKE_CASE : str = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Dict = prepare_img() SCREAMING_SNAKE_CASE : Tuple = processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase ) with torch.no_grad(): SCREAMING_SNAKE_CASE : str = model(**_lowerCamelCase ) SCREAMING_SNAKE_CASE : Dict = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , _lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor( [[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ).to(_lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowerCamelCase , atol=1e-4 ) )
19
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available lowerCAmelCase__ : List[Any] = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : Optional[int] = ['SpeechEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : Optional[Any] = ['FlaxSpeechEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys lowerCAmelCase__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
98
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ : str = { 'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'], 'feature_extraction_mctct': ['MCTCTFeatureExtractor'], 'processing_mctct': ['MCTCTProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : Any = [ 'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST', 'MCTCTForCTC', 'MCTCTModel', 'MCTCTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys lowerCAmelCase__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
98
1
"""simple docstring""" import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = LayoutLMTokenizer __UpperCamelCase = LayoutLMTokenizerFast __UpperCamelCase = True __UpperCamelCase = True def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' super().setUp() SCREAMING_SNAKE_CASE_ : List[str] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file''']) with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens])) def _SCREAMING_SNAKE_CASE ( self : Dict , **lowercase_ : List[str]): '''simple docstring''' return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = '''UNwant\u00E9d,running''' SCREAMING_SNAKE_CASE_ : Any = '''unwanted, running''' return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = self.tokenizer_class(self.vocab_file) SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.tokenize('''UNwant\u00E9d,running''') self.assertListEqual(lowercase_ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing''']) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_) , [7, 4, 5, 10, 8, 9]) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' pass
318
"""simple docstring""" import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def _A (__a ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = np.inf def set_batch_size(__a ) -> None: nonlocal batch_size if isinstance(__a , __a ): SCREAMING_SNAKE_CASE_ : Tuple = min(__a , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(__a , __a ): SCREAMING_SNAKE_CASE_ : int = min(__a , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(__a , __a ) and feature.dtype == "binary": SCREAMING_SNAKE_CASE_ : Union[str, Any] = min(__a , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(__a , __a ) return None if batch_size is np.inf else batch_size class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : Any , lowercase_ : NestedDataStructureLike[PathLike] , lowercase_ : Optional[NamedSplit] = None , lowercase_ : Optional[Features] = None , lowercase_ : str = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : Optional[int] = None , **lowercase_ : Optional[int] , ): '''simple docstring''' super().__init__( lowercase_ , split=lowercase_ , features=lowercase_ , cache_dir=lowercase_ , keep_in_memory=lowercase_ , streaming=lowercase_ , num_proc=lowercase_ , **lowercase_ , ) SCREAMING_SNAKE_CASE_ : Any = path_or_paths if isinstance(lowercase_ , lowercase_) else {self.split: path_or_paths} SCREAMING_SNAKE_CASE_ : Any = _PACKAGED_DATASETS_MODULES['''parquet'''][1] SCREAMING_SNAKE_CASE_ : Union[str, Any] = Parquet( cache_dir=lowercase_ , data_files=lowercase_ , features=lowercase_ , hash=lowercase_ , **lowercase_ , ) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' if self.streaming: SCREAMING_SNAKE_CASE_ : str = self.builder.as_streaming_dataset(split=self.split) # Build regular (map-style) dataset else: SCREAMING_SNAKE_CASE_ : Optional[Any] = None SCREAMING_SNAKE_CASE_ : Optional[int] = None SCREAMING_SNAKE_CASE_ : Tuple = None SCREAMING_SNAKE_CASE_ : Dict = None self.builder.download_and_prepare( download_config=lowercase_ , download_mode=lowercase_ , verification_mode=lowercase_ , base_path=lowercase_ , num_proc=self.num_proc , ) SCREAMING_SNAKE_CASE_ : Any = self.builder.as_dataset( split=self.split , verification_mode=lowercase_ , in_memory=self.keep_in_memory) return dataset class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Tuple , lowercase_ : Dataset , lowercase_ : Union[PathLike, BinaryIO] , lowercase_ : Optional[int] = None , **lowercase_ : Dict , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = dataset SCREAMING_SNAKE_CASE_ : Dict = path_or_buf SCREAMING_SNAKE_CASE_ : List[Any] = batch_size or get_writer_batch_size(dataset.features) SCREAMING_SNAKE_CASE_ : Any = parquet_writer_kwargs def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike)): with open(self.path_or_buf , '''wb+''') as buffer: SCREAMING_SNAKE_CASE_ : Optional[Any] = self._write(file_obj=lowercase_ , batch_size=lowercase_ , **self.parquet_writer_kwargs) else: SCREAMING_SNAKE_CASE_ : str = self._write(file_obj=self.path_or_buf , batch_size=lowercase_ , **self.parquet_writer_kwargs) return written def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : BinaryIO , lowercase_ : int , **lowercase_ : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = 0 SCREAMING_SNAKE_CASE_ : Optional[int] = parquet_writer_kwargs.pop('''path_or_buf''' , lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = self.dataset.features.arrow_schema SCREAMING_SNAKE_CASE_ : Tuple = pq.ParquetWriter(lowercase_ , schema=lowercase_ , **lowercase_) for offset in logging.tqdm( range(0 , len(self.dataset) , lowercase_) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ): SCREAMING_SNAKE_CASE_ : List[Any] = query_table( table=self.dataset._data , key=slice(lowercase_ , offset + batch_size) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(lowercase_) written += batch.nbytes writer.close() return written
318
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""", } class a_ ( snake_case_ ): '''simple docstring''' UpperCamelCase = '''gpt_neox_japanese''' def __init__( self , A=3_2000 , A=2560 , A=32 , A=32 , A=4 , A="gelu" , A=1.00 , A=1_0000 , A=2048 , A=0.02 , A=1e-5 , A=True , A=3_1996 , A=3_1999 , A=0.1 , A=0.0 , **A , ) -> List[str]: super().__init__(bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ ) _SCREAMING_SNAKE_CASE = vocab_size _SCREAMING_SNAKE_CASE = max_position_embeddings _SCREAMING_SNAKE_CASE = hidden_size _SCREAMING_SNAKE_CASE = num_hidden_layers _SCREAMING_SNAKE_CASE = num_attention_heads _SCREAMING_SNAKE_CASE = intermediate_multiple_size _SCREAMING_SNAKE_CASE = hidden_act _SCREAMING_SNAKE_CASE = rotary_pct _SCREAMING_SNAKE_CASE = rotary_emb_base _SCREAMING_SNAKE_CASE = initializer_range _SCREAMING_SNAKE_CASE = layer_norm_eps _SCREAMING_SNAKE_CASE = use_cache _SCREAMING_SNAKE_CASE = attention_dropout _SCREAMING_SNAKE_CASE = hidden_dropout
58
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType from ...utils.imports import is_botoa_available from .config_args import SageMakerConfig from .config_utils import ( DYNAMO_BACKENDS, _ask_field, _ask_options, _convert_dynamo_backend, _convert_mixed_precision, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool, ) if is_botoa_available(): import botoa # noqa: F401 def __lowerCAmelCase ( lowercase : List[str] ) -> str: """simple docstring""" snake_case : Optional[int] = botoa.client("iam" ) snake_case : Any = { "Version": "2012-10-17", "Statement": [ {"Effect": "Allow", "Principal": {"Service": "sagemaker.amazonaws.com"}, "Action": "sts:AssumeRole"} ], } try: # create the role, associated with the chosen trust policy iam_client.create_role( RoleName=lowercase , AssumeRolePolicyDocument=json.dumps(lowercase , indent=2 ) ) snake_case : Union[str, Any] = { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "sagemaker:*", "ecr:GetDownloadUrlForLayer", "ecr:BatchGetImage", "ecr:BatchCheckLayerAvailability", "ecr:GetAuthorizationToken", "cloudwatch:PutMetricData", "cloudwatch:GetMetricData", "cloudwatch:GetMetricStatistics", "cloudwatch:ListMetrics", "logs:CreateLogGroup", "logs:CreateLogStream", "logs:DescribeLogStreams", "logs:PutLogEvents", "logs:GetLogEvents", "s3:CreateBucket", "s3:ListBucket", "s3:GetBucketLocation", "s3:GetObject", "s3:PutObject", ], "Resource": "*", } ], } # attach policy to role iam_client.put_role_policy( RoleName=lowercase , PolicyName=F'{role_name}_policy_permission' , PolicyDocument=json.dumps(lowercase , indent=2 ) , ) except iam_client.exceptions.EntityAlreadyExistsException: print(F'role {role_name} already exists. Using existing one' ) def __lowerCAmelCase ( lowercase : Dict ) -> Optional[int]: """simple docstring""" snake_case : Any = botoa.client("iam" ) return iam_client.get_role(RoleName=lowercase )["Role"]["Arn"] def __lowerCAmelCase ( ) -> Union[str, Any]: """simple docstring""" snake_case : Optional[int] = _ask_options( "How do you want to authorize?" , ["AWS Profile", "Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) "] , lowercase , ) snake_case : int = None if credentials_configuration == 0: snake_case : Any = _ask_field("Enter your AWS Profile name: [default] " , default="default" ) snake_case : List[str] = aws_profile else: print( "Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with," "`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`" ) snake_case : Any = _ask_field("AWS Access Key ID: " ) snake_case : List[str] = aws_access_key_id snake_case : Optional[int] = _ask_field("AWS Secret Access Key: " ) snake_case : Union[str, Any] = aws_secret_access_key snake_case : Optional[Any] = _ask_field("Enter your AWS Region: [us-east-1]" , default="us-east-1" ) snake_case : List[str] = aws_region snake_case : List[str] = _ask_options( "Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?" , ["Provide IAM Role name", "Create new IAM role using credentials"] , lowercase , ) if role_management == 0: snake_case : Tuple = _ask_field("Enter your IAM role name: " ) else: snake_case : Union[str, Any] = "accelerate_sagemaker_execution_role" print(F'Accelerate will create an iam role "{iam_role_name}" using the provided credentials' ) _create_iam_role_for_sagemaker(lowercase ) snake_case : Union[str, Any] = _ask_field( "Do you want to use custom Docker image? [yes/NO]: " , _convert_yes_no_to_bool , default=lowercase , error_message="Please enter yes or no." , ) snake_case : Any = None if is_custom_docker_image: snake_case : Union[str, Any] = _ask_field("Enter your Docker image: " , lambda lowercase : str(lowercase ).lower() ) snake_case : List[Any] = _ask_field( "Do you want to provide SageMaker input channels with data locations? [yes/NO]: " , _convert_yes_no_to_bool , default=lowercase , error_message="Please enter yes or no." , ) snake_case : List[str] = None if is_sagemaker_inputs_enabled: snake_case : Dict = _ask_field( "Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): " , lambda lowercase : str(lowercase ).lower() , ) snake_case : Tuple = _ask_field( "Do you want to enable SageMaker metrics? [yes/NO]: " , _convert_yes_no_to_bool , default=lowercase , error_message="Please enter yes or no." , ) snake_case : int = None if is_sagemaker_metrics_enabled: snake_case : int = _ask_field( "Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): " , lambda lowercase : str(lowercase ).lower() , ) snake_case : str = _ask_options( "What is the distributed mode?" , ["No distributed training", "Data parallelism"] , _convert_sagemaker_distributed_mode , ) snake_case : Tuple = {} snake_case : Any = _ask_field( "Do you wish to optimize your script with torch dynamo?[yes/NO]:" , _convert_yes_no_to_bool , default=lowercase , error_message="Please enter yes or no." , ) if use_dynamo: snake_case : Any = "dynamo_" snake_case : Optional[int] = _ask_options( "Which dynamo backend would you like to use?" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , ) snake_case : Optional[int] = _ask_field( "Do you want to customize the defaults sent to torch.compile? [yes/NO]: " , _convert_yes_no_to_bool , default=lowercase , error_message="Please enter yes or no." , ) if use_custom_options: snake_case : Dict = _ask_options( "Which mode do you want to use?" , lowercase , lambda lowercase : TORCH_DYNAMO_MODES[int(lowercase )] , default="default" , ) snake_case : Union[str, Any] = _ask_field( "Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: " , _convert_yes_no_to_bool , default=lowercase , error_message="Please enter yes or no." , ) snake_case : Dict = _ask_field( "Do you want to enable dynamic shape tracing? [yes/NO]: " , _convert_yes_no_to_bool , default=lowercase , error_message="Please enter yes or no." , ) snake_case : List[str] = "Which EC2 instance type you want to use for your training?" if distributed_type != SageMakerDistributedType.NO: snake_case : str = _ask_options( lowercase , lowercase , lambda lowercase : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(lowercase )] ) else: eca_instance_query += "? [ml.p3.2xlarge]:" snake_case : Union[str, Any] = _ask_field(lowercase , lambda lowercase : str(lowercase ).lower() , default="ml.p3.2xlarge" ) snake_case : Any = 1 if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL): snake_case : Dict = _ask_field( "How many machines do you want use? [1]: " , lowercase , default=1 , ) snake_case : Union[str, Any] = _ask_options( "Do you wish to use FP16 or BF16 (mixed precision)?" , ["no", "fp16", "bf16", "fp8"] , _convert_mixed_precision , ) if use_dynamo and mixed_precision == "no": print( "Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts." ) return SageMakerConfig( image_uri=lowercase , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=lowercase , use_cpu=lowercase , dynamo_config=lowercase , eca_instance_type=lowercase , profile=lowercase , region=lowercase , iam_role_name=lowercase , mixed_precision=lowercase , num_machines=lowercase , sagemaker_inputs_file=lowercase , sagemaker_metrics_file=lowercase , )
203
0
import json import os import shutil import tempfile from unittest import TestCase from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available if is_torch_available() and is_datasets_available() and is_faiss_available(): from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.tokenization_rag import RagTokenizer @require_faiss @require_torch class __A ( a ): def _snake_case ( self ): lowerCamelCase =tempfile.mkdtemp() lowerCamelCase =8 # DPR tok lowerCamelCase =[ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] lowerCamelCase =os.path.join(self.tmpdirname , """dpr_tokenizer""" ) os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) lowerCamelCase =os.path.join(UpperCAmelCase_ , DPR_VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) # BART tok lowerCamelCase =[ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] lowerCamelCase =dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) ) lowerCamelCase =["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] lowerCamelCase ={"""unk_token""": """<unk>"""} lowerCamelCase =os.path.join(self.tmpdirname , """bart_tokenizer""" ) os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) lowerCamelCase =os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCamelCase =os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(UpperCAmelCase_ ) ) def _snake_case ( self ): return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) ) def _snake_case ( self ): return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) ) def _snake_case ( self ): shutil.rmtree(self.tmpdirname ) @require_tokenizers def _snake_case ( self ): lowerCamelCase =os.path.join(self.tmpdirname , """rag_tokenizer""" ) lowerCamelCase =RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() ) lowerCamelCase =RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() ) rag_config.save_pretrained(UpperCAmelCase_ ) rag_tokenizer.save_pretrained(UpperCAmelCase_ ) lowerCamelCase =RagTokenizer.from_pretrained(UpperCAmelCase_ , config=UpperCAmelCase_ ) self.assertIsInstance(new_rag_tokenizer.question_encoder , UpperCAmelCase_ ) self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() ) self.assertIsInstance(new_rag_tokenizer.generator , UpperCAmelCase_ ) self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() ) @slow def _snake_case ( self ): lowerCamelCase =RagTokenizer.from_pretrained("""facebook/rag-token-nq""" ) lowerCamelCase =[ """who got the first nobel prize in physics""", """when is the next deadpool movie being released""", """which mode is used for short wave broadcast service""", """who is the owner of reading football club""", """when is the next scandal episode coming out""", """when is the last time the philadelphia won the superbowl""", """what is the most current adobe flash player version""", """how many episodes are there in dragon ball z""", """what is the first step in the evolution of the eye""", """where is gall bladder situated in human body""", """what is the main mineral in lithium batteries""", """who is the president of usa right now""", """where do the greasers live in the outsiders""", """panda is a national animal of which country""", """what is the name of manchester united stadium""", ] lowerCamelCase =tokenizer(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) @slow def _snake_case ( self ): lowerCamelCase =RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""" ) lowerCamelCase =[ """who got the first nobel prize in physics""", """when is the next deadpool movie being released""", """which mode is used for short wave broadcast service""", """who is the owner of reading football club""", """when is the next scandal episode coming out""", """when is the last time the philadelphia won the superbowl""", """what is the most current adobe flash player version""", """how many episodes are there in dragon ball z""", """what is the first step in the evolution of the eye""", """where is gall bladder situated in human body""", """what is the main mineral in lithium batteries""", """who is the president of usa right now""", """where do the greasers live in the outsiders""", """panda is a national animal of which country""", """what is the name of manchester united stadium""", ] lowerCamelCase =tokenizer(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ )
262
from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline UpperCAmelCase__ : Dict =logging.get_logger(__name__) # pylint: disable=invalid-name class __A ( a ): def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ ): super().__init__() self.register_modules(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ ) @torch.no_grad() def __call__( self , UpperCAmelCase_ = 1 , UpperCAmelCase_ = 100 , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = True , ): if audio_length_in_s is None: lowerCamelCase =self.unet.config.sample_size / self.unet.config.sample_rate lowerCamelCase =audio_length_in_s * self.unet.config.sample_rate lowerCamelCase =2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( f"""{audio_length_in_s} is too small. Make sure it's bigger or equal to""" f""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" ) lowerCamelCase =int(UpperCAmelCase_ ) if sample_size % down_scale_factor != 0: lowerCamelCase =( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( f"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled""" f""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising""" """ process.""" ) lowerCamelCase =int(UpperCAmelCase_ ) lowerCamelCase =next(iter(self.unet.parameters() ) ).dtype lowerCamelCase =(batch_size, self.unet.config.in_channels, sample_size) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and len(UpperCAmelCase_ ) != batch_size: raise ValueError( f"""You have passed a list of generators of length {len(UpperCAmelCase_ )}, but requested an effective batch""" f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) lowerCamelCase =randn_tensor(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=self.device , dtype=UpperCAmelCase_ ) # set step values self.scheduler.set_timesteps(UpperCAmelCase_ , device=audio.device ) lowerCamelCase =self.scheduler.timesteps.to(UpperCAmelCase_ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output lowerCamelCase =self.unet(UpperCAmelCase_ , UpperCAmelCase_ ).sample # 2. compute previous image: x_t -> t_t-1 lowerCamelCase =self.scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ).prev_sample lowerCamelCase =audio.clamp(-1 , 1 ).float().cpu().numpy() lowerCamelCase =audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=UpperCAmelCase_ )
262
1
from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Union[str, Any] = { 'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json', 'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json', 'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json', 'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json', 'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json', 'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json', 'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json', 'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json', 'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json', 'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json', 'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json', 'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json', } class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : Tuple = """codegen""" lowerCamelCase_ : Optional[int] = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , UpperCamelCase__=5_0400 , UpperCamelCase__=2048 , UpperCamelCase__=2048 , UpperCamelCase__=4096 , UpperCamelCase__=28 , UpperCamelCase__=16 , UpperCamelCase__=64 , UpperCamelCase__=None , UpperCamelCase__="gelu_new" , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=1e-5 , UpperCamelCase__=0.02 , UpperCamelCase__=True , UpperCamelCase__=5_0256 , UpperCamelCase__=5_0256 , UpperCamelCase__=False , **UpperCamelCase__ , ) -> List[str]: lowerCamelCase : Union[str, Any] = vocab_size lowerCamelCase : Optional[Any] = n_ctx lowerCamelCase : Optional[int] = n_positions lowerCamelCase : int = n_embd lowerCamelCase : Optional[Any] = n_layer lowerCamelCase : List[Any] = n_head lowerCamelCase : Optional[int] = n_inner lowerCamelCase : Optional[int] = rotary_dim lowerCamelCase : int = activation_function lowerCamelCase : List[str] = resid_pdrop lowerCamelCase : Optional[int] = embd_pdrop lowerCamelCase : Tuple = attn_pdrop lowerCamelCase : int = layer_norm_epsilon lowerCamelCase : Optional[Any] = initializer_range lowerCamelCase : str = use_cache lowerCamelCase : List[Any] = bos_token_id lowerCamelCase : Tuple = eos_token_id super().__init__( bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , tie_word_embeddings=UpperCamelCase__ , **UpperCamelCase__ ) class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' def __init__( self , UpperCamelCase__ , UpperCamelCase__ = "default" , UpperCamelCase__ = None , UpperCamelCase__ = False , ) -> List[Any]: super().__init__(UpperCamelCase__ , task=UpperCamelCase__ , patching_specs=UpperCamelCase__ , use_past=UpperCamelCase__ ) if not getattr(self._config , "pad_token_id" , UpperCamelCase__ ): # TODO: how to do that better? lowerCamelCase : Union[str, Any] = 0 @property def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]: lowerCamelCase : List[Any] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} ) if self.use_past: self.fill_with_past_key_values_(UpperCamelCase__ , direction="inputs" ) lowerCamelCase : Optional[Any] = {0: "batch", 1: "past_sequence + sequence"} else: lowerCamelCase : Dict = {0: "batch", 1: "sequence"} return common_inputs @property def _lowercase ( self ) -> int: return self._config.n_layer @property def _lowercase ( self ) -> int: return self._config.n_head def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = -1 , UpperCamelCase__ = -1 , UpperCamelCase__ = False , UpperCamelCase__ = None , ) -> Mapping[str, Any]: lowerCamelCase : int = super(UpperCamelCase__ , self ).generate_dummy_inputs( UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ ) # We need to order the input in the way they appears in the forward() lowerCamelCase : int = OrderedDict({"input_ids": common_inputs["input_ids"]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch lowerCamelCase , lowerCamelCase : Optional[int] = common_inputs["input_ids"].shape # Not using the same length for past_key_values lowerCamelCase : List[Any] = seqlen + 2 lowerCamelCase : Any = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) lowerCamelCase : Union[str, Any] = [ (torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(self.num_layers ) ] lowerCamelCase : str = common_inputs["attention_mask"] if self.use_past: lowerCamelCase : Optional[Any] = ordered_inputs["attention_mask"].dtype lowerCamelCase : Union[str, Any] = torch.cat( [ordered_inputs["attention_mask"], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 ) return ordered_inputs @property def _lowercase ( self ) -> int: return 13
48
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__) def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> Any: lowerCamelCase : Any = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ("cls_token", "deit.embeddings.cls_token"), ("dist_token", "deit.embeddings.distillation_token"), ("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"), ("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"), ("pos_embed", "deit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" lowerCamelCase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ("norm.weight", "deit.layernorm.weight"), ("norm.bias", "deit.layernorm.bias"), ("head.weight", "cls_classifier.weight"), ("head.bias", "cls_classifier.bias"), ("head_dist.weight", "distillation_classifier.weight"), ("head_dist.bias", "distillation_classifier.bias"), ] ) return rename_keys def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> str: for i in range(config.num_hidden_layers ): if base_model: lowerCamelCase : Optional[int] = "" else: lowerCamelCase : List[str] = "deit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCamelCase : List[str] = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' ) lowerCamelCase : Optional[int] = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCamelCase : List[Any] = in_proj_weight[ : config.hidden_size, : ] lowerCamelCase : Any = in_proj_bias[: config.hidden_size] lowerCamelCase : List[str] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCamelCase : Optional[Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCamelCase : List[str] = in_proj_weight[ -config.hidden_size :, : ] lowerCamelCase : List[Any] = in_proj_bias[-config.hidden_size :] def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str: lowerCamelCase : List[str] = dct.pop(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Any = val def A ( ) -> List[str]: lowerCamelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" lowerCamelCase : str = Image.open(requests.get(_SCREAMING_SNAKE_CASE ,stream=_SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]: lowerCamelCase : Union[str, Any] = DeiTConfig() # all deit models have fine-tuned heads lowerCamelCase : Optional[int] = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size lowerCamelCase : Dict = 1000 lowerCamelCase : Tuple = "huggingface/label-files" lowerCamelCase : List[str] = "imagenet-1k-id2label.json" lowerCamelCase : List[Any] = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,repo_type="dataset" ) ,"r" ) ) lowerCamelCase : Optional[int] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} lowerCamelCase : Tuple = idalabel lowerCamelCase : str = {v: k for k, v in idalabel.items()} lowerCamelCase : Dict = int(deit_name[-6:-4] ) lowerCamelCase : Optional[Any] = int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith("tiny" ): lowerCamelCase : Optional[Any] = 192 lowerCamelCase : List[str] = 768 lowerCamelCase : Tuple = 12 lowerCamelCase : Optional[Any] = 3 elif deit_name[9:].startswith("small" ): lowerCamelCase : str = 384 lowerCamelCase : Optional[Any] = 1536 lowerCamelCase : Dict = 12 lowerCamelCase : Optional[int] = 6 if deit_name[9:].startswith("base" ): pass elif deit_name[4:].startswith("large" ): lowerCamelCase : str = 1024 lowerCamelCase : List[str] = 4096 lowerCamelCase : Any = 24 lowerCamelCase : Dict = 16 # load original model from timm lowerCamelCase : List[Any] = timm.create_model(_SCREAMING_SNAKE_CASE ,pretrained=_SCREAMING_SNAKE_CASE ) timm_model.eval() # load state_dict of original model, remove and rename some keys lowerCamelCase : Dict = timm_model.state_dict() lowerCamelCase : Dict = create_rename_keys(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) read_in_q_k_v(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) # load HuggingFace model lowerCamelCase : Optional[Any] = DeiTForImageClassificationWithTeacher(_SCREAMING_SNAKE_CASE ).eval() model.load_state_dict(_SCREAMING_SNAKE_CASE ) # Check outputs on an image, prepared by DeiTImageProcessor lowerCamelCase : Any = int( (256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 lowerCamelCase : Union[str, Any] = DeiTImageProcessor(size=_SCREAMING_SNAKE_CASE ,crop_size=config.image_size ) lowerCamelCase : str = image_processor(images=prepare_img() ,return_tensors="pt" ) lowerCamelCase : int = encoding["pixel_values"] lowerCamelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Union[str, Any] = timm_model(_SCREAMING_SNAKE_CASE ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_SCREAMING_SNAKE_CASE ,outputs.logits ,atol=1e-3 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--deit_name', default='vit_deit_base_distilled_patch16_224', type=str, help='Name of the DeiT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
48
1
from math import pi def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> float: return 2 * pi * radius * (angle / 360) if __name__ == "__main__": print(arc_length(90, 10))
307
# Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position lowerCamelCase__ = """2.13.1""" import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse("""3.7"""): raise ImportWarning( """To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition.""" ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( """To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n""" """If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.""" ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip lowerCamelCase__ = concatenate_datasets lowerCamelCase__ = DownloadConfig lowerCamelCase__ = DownloadManager lowerCamelCase__ = DownloadMode lowerCamelCase__ = DownloadConfig lowerCamelCase__ = DownloadMode lowerCamelCase__ = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
307
1
class A_ : def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : list[int]): __lowerCamelCase : int = len(SCREAMING_SNAKE_CASE__) __lowerCamelCase : int = [0] * len_array if len_array > 0: __lowerCamelCase : Optional[int] = array[0] for i in range(1 ,SCREAMING_SNAKE_CASE__): __lowerCamelCase : Tuple = self.prefix_sum[i - 1] + array[i] def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int): if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : int): __lowerCamelCase : Optional[Any] = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(SCREAMING_SNAKE_CASE__) return False if __name__ == "__main__": import doctest doctest.testmod()
73
from __future__ import annotations import time a =list[tuple[int, int]] a =[ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] a =[[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right class A_ : def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Node | None): __lowerCamelCase : Tuple = pos_x __lowerCamelCase : List[str] = pos_y __lowerCamelCase : str = (pos_y, pos_x) __lowerCamelCase : str = goal_x __lowerCamelCase : int = goal_y __lowerCamelCase : List[Any] = parent class A_ : def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : tuple[int, int] ,SCREAMING_SNAKE_CASE__ : tuple[int, int]): __lowerCamelCase : Any = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : Union[str, Any] = [self.start] __lowerCamelCase : List[str] = False def lowerCAmelCase ( self : List[Any]): while self.node_queue: __lowerCamelCase : Any = self.node_queue.pop(0) if current_node.pos == self.target.pos: __lowerCamelCase : Dict = True return self.retrace_path(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Tuple = self.get_successors(SCREAMING_SNAKE_CASE__) for node in successors: self.node_queue.append(SCREAMING_SNAKE_CASE__) if not self.reached: return [self.start.pos] return None def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Node): __lowerCamelCase : Union[str, Any] = [] for action in delta: __lowerCamelCase : Optional[Any] = parent.pos_x + action[1] __lowerCamelCase : Optional[int] = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE__) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.target.pos_y ,self.target.pos_x ,SCREAMING_SNAKE_CASE__)) return successors def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Node | None): __lowerCamelCase : List[Any] = node __lowerCamelCase : int = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x)) __lowerCamelCase : int = current_node.parent path.reverse() return path class A_ : def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int): __lowerCamelCase : int = BreadthFirstSearch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = BreadthFirstSearch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = False def lowerCAmelCase ( self : str): while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue: __lowerCamelCase : Any = self.fwd_bfs.node_queue.pop(0) __lowerCamelCase : Any = self.bwd_bfs.node_queue.pop(0) if current_bwd_node.pos == current_fwd_node.pos: __lowerCamelCase : List[str] = True return self.retrace_bidirectional_path( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = current_bwd_node __lowerCamelCase : int = current_fwd_node __lowerCamelCase : str = { self.fwd_bfs: self.fwd_bfs.get_successors(SCREAMING_SNAKE_CASE__), self.bwd_bfs: self.bwd_bfs.get_successors(SCREAMING_SNAKE_CASE__), } for bfs in [self.fwd_bfs, self.bwd_bfs]: for node in successors[bfs]: bfs.node_queue.append(SCREAMING_SNAKE_CASE__) if not self.reached: return [self.fwd_bfs.start.pos] return None def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Node ,SCREAMING_SNAKE_CASE__ : Node): __lowerCamelCase : List[Any] = self.fwd_bfs.retrace_path(SCREAMING_SNAKE_CASE__) __lowerCamelCase : str = self.bwd_bfs.retrace_path(SCREAMING_SNAKE_CASE__) bwd_path.pop() bwd_path.reverse() __lowerCamelCase : List[Any] = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] import doctest doctest.testmod() a =(0, 0) a =(len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) a =time.time() a =BreadthFirstSearch(init, goal) a =bfs.search() a =time.time() - start_bfs_time print("""Unidirectional BFS computation time : """, bfs_time) a =time.time() a =BidirectionalBreadthFirstSearch(init, goal) a =bd_bfs.search() a =time.time() - start_bd_bfs_time print("""Bidirectional BFS computation time : """, bd_bfs_time)
73
1
from __future__ import annotations def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : list[str] | None = None , SCREAMING_SNAKE_CASE__ : dict[str, float] | None = None , SCREAMING_SNAKE_CASE__ : bool = False , ): __UpperCamelCase =cipher_alphabet or [chr(SCREAMING_SNAKE_CASE__ ) for i in range(97 , 1_23 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) __UpperCamelCase ={ 'a': 0.08497, 'b': 0.01492, 'c': 0.02202, 'd': 0.04253, 'e': 0.11162, 'f': 0.02228, 'g': 0.02015, 'h': 0.06094, 'i': 0.07546, 'j': 0.00153, 'k': 0.01292, 'l': 0.04025, 'm': 0.02406, 'n': 0.06749, 'o': 0.07507, 'p': 0.01929, 'q': 0.00095, 'r': 0.07587, 's': 0.06327, 't': 0.09356, 'u': 0.02758, 'v': 0.00978, 'w': 0.02560, 'x': 0.00150, 'y': 0.01994, 'z': 0.00077, } else: # Custom frequencies dictionary __UpperCamelCase =frequencies_dict if not case_sensitive: __UpperCamelCase =ciphertext.lower() # Chi squared statistic values __UpperCamelCase ={} # cycle through all of the shifts for shift in range(len(SCREAMING_SNAKE_CASE__ ) ): __UpperCamelCase ='' # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet __UpperCamelCase =(alphabet_letters.index(letter.lower() ) - shift) % len( SCREAMING_SNAKE_CASE__ ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter __UpperCamelCase =0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: __UpperCamelCase =letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message __UpperCamelCase =decrypted_with_shift.lower().count(SCREAMING_SNAKE_CASE__ ) # Get the excepcted amount of times the letter should appear based # on letter frequencies __UpperCamelCase =frequencies[letter] * occurrences # Complete the chi squared statistic formula __UpperCamelCase =((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message __UpperCamelCase =decrypted_with_shift.count(SCREAMING_SNAKE_CASE__ ) # Get the excepcted amount of times the letter should appear based # on letter frequencies __UpperCamelCase =frequencies[letter] * occurrences # Complete the chi squared statistic formula __UpperCamelCase =((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary __UpperCamelCase =( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(SCREAMING_SNAKE_CASE__ : int ) -> tuple[float, str]: return chi_squared_statistic_values[key] __UpperCamelCase =min( SCREAMING_SNAKE_CASE__ , key=SCREAMING_SNAKE_CASE__ , ) # Get all the data from the most likely cipher (key, decoded message) ( ( __UpperCamelCase ) , ( __UpperCamelCase ) , ) =chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
117
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from ...utils.dataclasses import ( ComputeEnvironment, DistributedType, DynamoBackend, PrecisionType, SageMakerDistributedType, ) from ..menu import BulletMenu _A = [ 'EAGER', 'AOT_EAGER', 'INDUCTOR', 'NVFUSER', 'AOT_NVFUSER', 'AOT_CUDAGRAPHS', 'OFI', 'FX2TRT', 'ONNXRT', 'IPEX', ] def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : str=None ): __UpperCamelCase =True while ask_again: __UpperCamelCase =input(SCREAMING_SNAKE_CASE__ ) try: if default is not None and len(SCREAMING_SNAKE_CASE__ ) == 0: return default return convert_value(SCREAMING_SNAKE_CASE__ ) if convert_value is not None else result except Exception: if error_message is not None: print(SCREAMING_SNAKE_CASE__ ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str]=[] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : str=0 ): __UpperCamelCase =BulletMenu(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =menu.run(default_choice=SCREAMING_SNAKE_CASE__ ) return convert_value(SCREAMING_SNAKE_CASE__ ) if convert_value is not None else result def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ): __UpperCamelCase =int(SCREAMING_SNAKE_CASE__ ) return ComputeEnvironment(['LOCAL_MACHINE', 'AMAZON_SAGEMAKER'][value] ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] ): __UpperCamelCase =int(SCREAMING_SNAKE_CASE__ ) return DistributedType(['NO', 'MULTI_CPU', 'MULTI_XPU', 'MULTI_GPU', 'MULTI_NPU', 'TPU'][value] ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ): __UpperCamelCase =int(SCREAMING_SNAKE_CASE__ ) return DynamoBackend(DYNAMO_BACKENDS[value] ).value def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict ): __UpperCamelCase =int(SCREAMING_SNAKE_CASE__ ) return PrecisionType(['no', 'fp16', 'bf16', 'fp8'][value] ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ): __UpperCamelCase =int(SCREAMING_SNAKE_CASE__ ) return SageMakerDistributedType(['NO', 'DATA_PARALLEL', 'MODEL_PARALLEL'][value] ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any ): return {"yes": True, "no": False}[value.lower()] class UpperCAmelCase__ ( argparse.RawDescriptionHelpFormatter ): """simple docstring""" def _a ( self , A_ , A_ , A_ , A_ ) -> Optional[int]: __UpperCamelCase =super()._format_usage(A_ , A_ , A_ , A_ ) __UpperCamelCase =usage.replace('<command> [<args>] ' , '' ) return usage
117
1
"""simple docstring""" import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated lowerCamelCase__ : str = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test''']) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ lowerCamelCase__ : Union[str, Any] = '''https://storage.googleapis.com/cvdf-datasets/mnist/''' def UpperCamelCase ( _lowerCAmelCase : List[str] ) -> Optional[Any]: _UpperCAmelCase : str = numpy.dtype(numpy.uintaa ).newbyteorder(""">""" ) return numpy.frombuffer(bytestream.read(4 ), dtype=_lowerCAmelCase )[0] @deprecated(_lowerCAmelCase, """Please use tf.data to implement this functionality.""" ) def UpperCamelCase ( _lowerCAmelCase : int ) -> Optional[Any]: print("""Extracting""", f.name ) with gzip.GzipFile(fileobj=_lowerCAmelCase ) as bytestream: _UpperCAmelCase : Tuple = _readaa(_lowerCAmelCase ) if magic != 2051: raise ValueError( """Invalid magic number %d in MNIST image file: %s""" % (magic, f.name) ) _UpperCAmelCase : Optional[int] = _readaa(_lowerCAmelCase ) _UpperCAmelCase : Union[str, Any] = _readaa(_lowerCAmelCase ) _UpperCAmelCase : List[str] = _readaa(_lowerCAmelCase ) _UpperCAmelCase : List[str] = bytestream.read(rows * cols * num_images ) _UpperCAmelCase : Optional[Any] = numpy.frombuffer(_lowerCAmelCase, dtype=numpy.uinta ) _UpperCAmelCase : Any = data.reshape(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, 1 ) return data @deprecated(_lowerCAmelCase, """Please use tf.one_hot on tensors.""" ) def UpperCamelCase ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Tuple ) -> Union[str, Any]: _UpperCAmelCase : int = labels_dense.shape[0] _UpperCAmelCase : Any = numpy.arange(_lowerCAmelCase ) * num_classes _UpperCAmelCase : Tuple = numpy.zeros((num_labels, num_classes) ) _UpperCAmelCase : Dict = 1 return labels_one_hot @deprecated(_lowerCAmelCase, """Please use tf.data to implement this functionality.""" ) def UpperCamelCase ( _lowerCAmelCase : Optional[int], _lowerCAmelCase : Optional[int]=False, _lowerCAmelCase : Optional[Any]=10 ) -> Union[str, Any]: print("""Extracting""", f.name ) with gzip.GzipFile(fileobj=_lowerCAmelCase ) as bytestream: _UpperCAmelCase : Tuple = _readaa(_lowerCAmelCase ) if magic != 2049: raise ValueError( """Invalid magic number %d in MNIST label file: %s""" % (magic, f.name) ) _UpperCAmelCase : str = _readaa(_lowerCAmelCase ) _UpperCAmelCase : Dict = bytestream.read(_lowerCAmelCase ) _UpperCAmelCase : Optional[Any] = numpy.frombuffer(_lowerCAmelCase, dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(_lowerCAmelCase, _lowerCAmelCase ) return labels class _UpperCAmelCase : @deprecated( _A , """Please use alternatives such as official/mnist/_DataSet.py""" """ from tensorflow/models.""" , ) def __init__( self , _A , _A , _A=False , _A=False , _A=dtypes.floataa , _A=True , _A=None , ) -> str: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase : int = random_seed.get_seed(_A ) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda ) _UpperCAmelCase : Tuple = dtypes.as_dtype(_A ).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError("""Invalid image dtype %r, expected uint8 or float32""" % dtype ) if fake_data: _UpperCAmelCase : Union[str, Any] = 1_00_00 _UpperCAmelCase : Union[str, Any] = one_hot else: assert ( images.shape[0] == labels.shape[0] ), f'''images.shape: {images.shape} labels.shape: {labels.shape}''' _UpperCAmelCase : Any = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 _UpperCAmelCase : int = images.reshape( images.shape[0] , images.shape[1] * images.shape[2] ) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. _UpperCAmelCase : Dict = images.astype(numpy.floataa ) _UpperCAmelCase : Any = numpy.multiply(_A , 1.0 / 255.0 ) _UpperCAmelCase : Union[str, Any] = images _UpperCAmelCase : List[Any] = labels _UpperCAmelCase : Optional[Any] = 0 _UpperCAmelCase : Optional[Any] = 0 @property def __snake_case ( self ) -> Optional[int]: '''simple docstring''' return self._images @property def __snake_case ( self ) -> Any: '''simple docstring''' return self._labels @property def __snake_case ( self ) -> Union[str, Any]: '''simple docstring''' return self._num_examples @property def __snake_case ( self ) -> Optional[int]: '''simple docstring''' return self._epochs_completed def __snake_case ( self , _A , _A=False , _A=True ) -> Tuple: '''simple docstring''' if fake_data: _UpperCAmelCase : int = [1] * 7_84 _UpperCAmelCase : str = [1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(_A )], [fake_label for _ in range(_A )], ) _UpperCAmelCase : Tuple = self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: _UpperCAmelCase : str = numpy.arange(self._num_examples ) numpy.random.shuffle(_A ) _UpperCAmelCase : List[Any] = self.images[perma] _UpperCAmelCase : Union[str, Any] = self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch _UpperCAmelCase : List[Any] = self._num_examples - start _UpperCAmelCase : str = self._images[start : self._num_examples] _UpperCAmelCase : List[str] = self._labels[start : self._num_examples] # Shuffle the data if shuffle: _UpperCAmelCase : str = numpy.arange(self._num_examples ) numpy.random.shuffle(_A ) _UpperCAmelCase : Optional[int] = self.images[perm] _UpperCAmelCase : str = self.labels[perm] # Start next epoch _UpperCAmelCase : List[Any] = 0 _UpperCAmelCase : Tuple = batch_size - rest_num_examples _UpperCAmelCase : Union[str, Any] = self._index_in_epoch _UpperCAmelCase : Optional[int] = self._images[start:end] _UpperCAmelCase : str = self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part) , axis=0 ), numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ), ) else: self._index_in_epoch += batch_size _UpperCAmelCase : List[Any] = self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(_lowerCAmelCase, """Please write your own downloading logic.""" ) def UpperCamelCase ( _lowerCAmelCase : int, _lowerCAmelCase : List[Any], _lowerCAmelCase : Optional[Any] ) -> Union[str, Any]: if not gfile.Exists(_lowerCAmelCase ): gfile.MakeDirs(_lowerCAmelCase ) _UpperCAmelCase : Optional[int] = os.path.join(_lowerCAmelCase, _lowerCAmelCase ) if not gfile.Exists(_lowerCAmelCase ): urllib.request.urlretrieve(_lowerCAmelCase, _lowerCAmelCase ) # noqa: S310 with gfile.GFile(_lowerCAmelCase ) as f: _UpperCAmelCase : Optional[int] = f.size() print("""Successfully downloaded""", _lowerCAmelCase, _lowerCAmelCase, """bytes.""" ) return filepath @deprecated( _lowerCAmelCase, """Please use alternatives such as:""" """ tensorflow_datasets.load('mnist')""" ) def UpperCamelCase ( _lowerCAmelCase : Tuple, _lowerCAmelCase : str=False, _lowerCAmelCase : List[str]=False, _lowerCAmelCase : Tuple=dtypes.floataa, _lowerCAmelCase : List[str]=True, _lowerCAmelCase : Union[str, Any]=5000, _lowerCAmelCase : Optional[Any]=None, _lowerCAmelCase : int=DEFAULT_SOURCE_URL, ) -> Optional[Any]: if fake_data: def fake(): return _DataSet( [], [], fake_data=_lowerCAmelCase, one_hot=_lowerCAmelCase, dtype=_lowerCAmelCase, seed=_lowerCAmelCase ) _UpperCAmelCase : List[Any] = fake() _UpperCAmelCase : int = fake() _UpperCAmelCase : Any = fake() return _Datasets(train=_lowerCAmelCase, validation=_lowerCAmelCase, test=_lowerCAmelCase ) if not source_url: # empty string check _UpperCAmelCase : Optional[Any] = DEFAULT_SOURCE_URL _UpperCAmelCase : Optional[int] = """train-images-idx3-ubyte.gz""" _UpperCAmelCase : int = """train-labels-idx1-ubyte.gz""" _UpperCAmelCase : Optional[Any] = """t10k-images-idx3-ubyte.gz""" _UpperCAmelCase : Tuple = """t10k-labels-idx1-ubyte.gz""" _UpperCAmelCase : Tuple = _maybe_download( _lowerCAmelCase, _lowerCAmelCase, source_url + train_images_file ) with gfile.Open(_lowerCAmelCase, """rb""" ) as f: _UpperCAmelCase : Optional[int] = _extract_images(_lowerCAmelCase ) _UpperCAmelCase : Any = _maybe_download( _lowerCAmelCase, _lowerCAmelCase, source_url + train_labels_file ) with gfile.Open(_lowerCAmelCase, """rb""" ) as f: _UpperCAmelCase : Optional[int] = _extract_labels(_lowerCAmelCase, one_hot=_lowerCAmelCase ) _UpperCAmelCase : Optional[int] = _maybe_download( _lowerCAmelCase, _lowerCAmelCase, source_url + test_images_file ) with gfile.Open(_lowerCAmelCase, """rb""" ) as f: _UpperCAmelCase : Union[str, Any] = _extract_images(_lowerCAmelCase ) _UpperCAmelCase : Optional[int] = _maybe_download( _lowerCAmelCase, _lowerCAmelCase, source_url + test_labels_file ) with gfile.Open(_lowerCAmelCase, """rb""" ) as f: _UpperCAmelCase : List[Any] = _extract_labels(_lowerCAmelCase, one_hot=_lowerCAmelCase ) if not 0 <= validation_size <= len(_lowerCAmelCase ): _UpperCAmelCase : int = ( """Validation size should be between 0 and """ f'''{len(_lowerCAmelCase )}. Received: {validation_size}.''' ) raise ValueError(_lowerCAmelCase ) _UpperCAmelCase : str = train_images[:validation_size] _UpperCAmelCase : Union[str, Any] = train_labels[:validation_size] _UpperCAmelCase : Optional[Any] = train_images[validation_size:] _UpperCAmelCase : Optional[int] = train_labels[validation_size:] _UpperCAmelCase : Optional[int] = {"""dtype""": dtype, """reshape""": reshape, """seed""": seed} _UpperCAmelCase : Tuple = _DataSet(_lowerCAmelCase, _lowerCAmelCase, **_lowerCAmelCase ) _UpperCAmelCase : Dict = _DataSet(_lowerCAmelCase, _lowerCAmelCase, **_lowerCAmelCase ) _UpperCAmelCase : List[Any] = _DataSet(_lowerCAmelCase, _lowerCAmelCase, **_lowerCAmelCase ) return _Datasets(train=_lowerCAmelCase, validation=_lowerCAmelCase, test=_lowerCAmelCase )
246
"""simple docstring""" import argparse import OmegaConf import torch from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel def UpperCamelCase ( _lowerCAmelCase : Any, _lowerCAmelCase : List[str], _lowerCAmelCase : Dict ) -> str: _UpperCAmelCase : Union[str, Any] = OmegaConf.load(_lowerCAmelCase ) _UpperCAmelCase : str = torch.load(_lowerCAmelCase, map_location="""cpu""" )["""model"""] _UpperCAmelCase : Dict = list(state_dict.keys() ) # extract state_dict for VQVAE _UpperCAmelCase : List[str] = {} _UpperCAmelCase : List[str] = """first_stage_model.""" for key in keys: if key.startswith(_lowerCAmelCase ): _UpperCAmelCase : Dict = state_dict[key] # extract state_dict for UNetLDM _UpperCAmelCase : str = {} _UpperCAmelCase : Tuple = """model.diffusion_model.""" for key in keys: if key.startswith(_lowerCAmelCase ): _UpperCAmelCase : Tuple = state_dict[key] _UpperCAmelCase : Optional[Any] = config.model.params.first_stage_config.params _UpperCAmelCase : Optional[Any] = config.model.params.unet_config.params _UpperCAmelCase : List[str] = VQModel(**_lowerCAmelCase ).eval() vqvae.load_state_dict(_lowerCAmelCase ) _UpperCAmelCase : List[Any] = UNetLDMModel(**_lowerCAmelCase ).eval() unet.load_state_dict(_lowerCAmelCase ) _UpperCAmelCase : Union[str, Any] = DDIMScheduler( timesteps=config.model.params.timesteps, beta_schedule="""scaled_linear""", beta_start=config.model.params.linear_start, beta_end=config.model.params.linear_end, clip_sample=_lowerCAmelCase, ) _UpperCAmelCase : Tuple = LDMPipeline(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) pipeline.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": lowerCamelCase__ : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('''--checkpoint_path''', type=str, required=True) parser.add_argument('''--config_path''', type=str, required=True) parser.add_argument('''--output_path''', type=str, required=True) lowerCamelCase__ : List[str] = parser.parse_args() convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
246
1
import inspect import jax import jax.lax as lax import jax.numpy as jnp from ..utils import add_start_docstrings from ..utils.logging import get_logger lowercase_ = get_logger(__name__) lowercase_ = R'\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n' class A_ : '''simple docstring''' @add_start_docstrings(lowercase_ ) def __call__( self: Union[str, Any] , a: jnp.ndarray , a: jnp.ndarray ): raise NotImplementedError( F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) class A_ : '''simple docstring''' @add_start_docstrings(lowercase_ ) def __call__( self: Optional[int] , a: jnp.ndarray , a: jnp.ndarray ): raise NotImplementedError( F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) class A_ ( __UpperCamelCase ): '''simple docstring''' @add_start_docstrings(lowercase_ ) def __call__( self: Union[str, Any] , a: jnp.ndarray , a: jnp.ndarray , a: int , **a: List[Any] ): for processor in self: __lowerCamelCase : Optional[int] = inspect.signature(processor.__call__ ).parameters if len(lowercase_ ) > 3: if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ): raise ValueError( F'Make sure that all the required parameters: {list(function_args.keys() )} for ' F'{processor.__class__} are passed to the logits processor.' ) __lowerCamelCase : Tuple = processor(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ) else: __lowerCamelCase : Optional[Any] = processor(lowercase_ , lowercase_ , lowercase_ ) return scores class A_ ( __UpperCamelCase ): '''simple docstring''' def __init__( self: Optional[int] , a: float ): if not isinstance(lowercase_ , lowercase_ ) or not (temperature > 0): raise ValueError(F'`temperature` has to be a strictly positive float, but is {temperature}' ) __lowerCamelCase : List[Any] = temperature def __call__( self: Tuple , a: jnp.ndarray , a: jnp.ndarray , a: int ): __lowerCamelCase : Dict = scores / self.temperature return scores class A_ ( __UpperCamelCase ): '''simple docstring''' def __init__( self: Optional[Any] , a: float , a: float = -float('Inf' ) , a: int = 1 ): if not isinstance(lowercase_ , lowercase_ ) or (top_p < 0 or top_p > 1.0): raise ValueError(F'`top_p` has to be a float > 0 and < 1, but is {top_p}' ) if not isinstance(lowercase_ , lowercase_ ) or (min_tokens_to_keep < 1): raise ValueError(F'`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}' ) __lowerCamelCase : Tuple = top_p __lowerCamelCase : List[str] = filter_value __lowerCamelCase : Union[str, Any] = min_tokens_to_keep def __call__( self: Optional[Any] , a: jnp.ndarray , a: jnp.ndarray , a: int ): __lowerCamelCase , __lowerCamelCase : List[Any] = lax.top_k(lowercase_ , scores.shape[-1] ) __lowerCamelCase : Union[str, Any] = jnp.full_like(lowercase_ , self.filter_value ) __lowerCamelCase : Union[str, Any] = jax.nn.softmax(lowercase_ , axis=-1 ).cumsum(axis=-1 ) __lowerCamelCase : List[Any] = cumulative_probs < self.top_p # include the token that is higher than top_p as well __lowerCamelCase : List[Any] = jnp.roll(lowercase_ , 1 ) score_mask |= score_mask.at[:, 0].set(lowercase_ ) # min tokens to keep __lowerCamelCase : int = score_mask.at[:, : self.min_tokens_to_keep].set(lowercase_ ) __lowerCamelCase : Tuple = jnp.where(lowercase_ , lowercase_ , lowercase_ ) __lowerCamelCase : Optional[Any] = jax.lax.sort_key_val(lowercase_ , lowercase_ )[-1] return next_scores class A_ ( __UpperCamelCase ): '''simple docstring''' def __init__( self: Any , a: int , a: float = -float('Inf' ) , a: int = 1 ): if not isinstance(lowercase_ , lowercase_ ) or top_k <= 0: raise ValueError(F'`top_k` has to be a strictly positive integer, but is {top_k}' ) __lowerCamelCase : Tuple = max(lowercase_ , lowercase_ ) __lowerCamelCase : int = filter_value def __call__( self: str , a: jnp.ndarray , a: jnp.ndarray , a: int ): __lowerCamelCase , __lowerCamelCase : List[Any] = scores.shape __lowerCamelCase : List[str] = jnp.full(batch_size * vocab_size , self.filter_value ) __lowerCamelCase : int = min(self.top_k , scores.shape[-1] ) # Safety check __lowerCamelCase , __lowerCamelCase : int = lax.top_k(lowercase_ , lowercase_ ) __lowerCamelCase : Optional[int] = jnp.broadcast_to((jnp.arange(lowercase_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten() __lowerCamelCase : List[Any] = topk_scores.flatten() __lowerCamelCase : Union[str, Any] = topk_indices.flatten() + shift __lowerCamelCase : Optional[Any] = next_scores_flat.at[topk_indices_flat].set(lowercase_ ) __lowerCamelCase : List[str] = next_scores_flat.reshape(lowercase_ , lowercase_ ) return next_scores class A_ ( __UpperCamelCase ): '''simple docstring''' def __init__( self: Optional[Any] , a: int ): __lowerCamelCase : Any = bos_token_id def __call__( self: int , a: jnp.ndarray , a: jnp.ndarray , a: int ): __lowerCamelCase : List[str] = jnp.full(scores.shape , -float('inf' ) ) __lowerCamelCase : List[str] = 1 - jnp.bool_(cur_len - 1 ) __lowerCamelCase : Union[str, Any] = jnp.where(lowercase_ , new_scores.at[:, self.bos_token_id].set(0 ) , lowercase_ ) return scores class A_ ( __UpperCamelCase ): '''simple docstring''' def __init__( self: Optional[Any] , a: int , a: int ): __lowerCamelCase : Optional[Any] = max_length __lowerCamelCase : List[Any] = eos_token_id def __call__( self: Tuple , a: jnp.ndarray , a: jnp.ndarray , a: int ): __lowerCamelCase : List[str] = jnp.full(scores.shape , -float('inf' ) ) __lowerCamelCase : Union[str, Any] = 1 - jnp.bool_(cur_len - self.max_length + 1 ) __lowerCamelCase : str = jnp.where(lowercase_ , new_scores.at[:, self.eos_token_id].set(0 ) , lowercase_ ) return scores class A_ ( __UpperCamelCase ): '''simple docstring''' def __init__( self: Union[str, Any] , a: int , a: int ): if not isinstance(lowercase_ , lowercase_ ) or min_length < 0: raise ValueError(F'`min_length` has to be a positive integer, but is {min_length}' ) if not isinstance(lowercase_ , lowercase_ ) or eos_token_id < 0: raise ValueError(F'`eos_token_id` has to be a positive integer, but is {eos_token_id}' ) __lowerCamelCase : List[str] = min_length __lowerCamelCase : int = eos_token_id def __call__( self: str , a: jnp.ndarray , a: jnp.ndarray , a: int ): __lowerCamelCase : List[Any] = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 ) __lowerCamelCase : Tuple = jnp.where(lowercase_ , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , lowercase_ ) return scores class A_ ( __UpperCamelCase ): '''simple docstring''' def __init__( self: List[str] , a: Optional[int] , a: Optional[Any] ): __lowerCamelCase : Any = list(lowercase_ ) __lowerCamelCase : List[Any] = begin_index def __call__( self: Any , a: Optional[Any] , a: Optional[Any] , a: int ): __lowerCamelCase : Optional[Any] = 1 - jnp.bool_(cur_len - self.begin_index ) __lowerCamelCase : Any = jnp.where(lowercase_ , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , lowercase_ ) return scores class A_ ( __UpperCamelCase ): '''simple docstring''' def __init__( self: Optional[Any] , a: list ): __lowerCamelCase : List[Any] = list(lowercase_ ) def __call__( self: Union[str, Any] , a: jnp.ndarray , a: jnp.ndarray , a: int ): __lowerCamelCase : Dict = scores.at[..., self.suppress_tokens].set(-float('inf' ) ) return scores class A_ ( __UpperCamelCase ): '''simple docstring''' def __init__( self: Any , a: Union[str, Any] ): __lowerCamelCase : List[Any] = dict(lowercase_ ) # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the # index of the array corresponds to the index of the token to be forced, for XLA compatibility. # Indexes without forced tokens will have a negative value. __lowerCamelCase : Dict = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1 for index, token in force_token_map.items(): if token is not None: __lowerCamelCase : Optional[int] = force_token_array.at[index].set(lowercase_ ) __lowerCamelCase : List[str] = jnp.intaa(lowercase_ ) def __call__( self: Dict , a: jnp.ndarray , a: jnp.ndarray , a: int ): def _force_token(a: int ): __lowerCamelCase : Tuple = scores.shape[0] __lowerCamelCase : List[str] = self.force_token_array[generation_idx] __lowerCamelCase : str = jnp.ones_like(lowercase_ , dtype=scores.dtype ) * -float('inf' ) __lowerCamelCase : Dict = jnp.zeros((batch_size, 1) , dtype=scores.dtype ) __lowerCamelCase : Optional[Any] = lax.dynamic_update_slice(lowercase_ , lowercase_ , (0, current_token) ) return new_scores __lowerCamelCase : Optional[Any] = lax.cond( cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond( self.force_token_array[cur_len] >= 0 , lambda: _force_token(lowercase_ ) , lambda: scores , ) , ) return scores class A_ ( __UpperCamelCase ): '''simple docstring''' def __init__( self: Dict , a: List[Any] , a: List[Any] , a: List[str] ): __lowerCamelCase : Optional[Any] = generate_config.eos_token_id __lowerCamelCase : Optional[Any] = generate_config.no_timestamps_token_id __lowerCamelCase : Dict = generate_config.no_timestamps_token_id + 1 __lowerCamelCase : Union[str, Any] = decoder_input_length + 1 if generate_config.is_multilingual: # room for language token and task token self.begin_index += 2 if hasattr(lowercase_ , 'max_initial_timestamp_index' ): __lowerCamelCase : str = generate_config.max_initial_timestamp_index else: __lowerCamelCase : Union[str, Any] = model_config.vocab_size if self.max_initial_timestamp_index is None: __lowerCamelCase : Any = model_config.vocab_size def __call__( self: Tuple , a: Tuple , a: Tuple , a: Optional[Any] ): __lowerCamelCase : str = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) ) def handle_pairs(a: Optional[int] , a: Union[str, Any] ): __lowerCamelCase : Union[str, Any] = jnp.where((cur_len - self.begin_index) >= 1 , lowercase_ , lowercase_ ) __lowerCamelCase : str = jnp.where( input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , lowercase_ , ) __lowerCamelCase : Optional[Any] = jnp.where((cur_len - self.begin_index) < 2 , lowercase_ , lowercase_ ) __lowerCamelCase : str = jnp.where( input_ids_k[cur_len - 2] >= self.timestamp_begin , lowercase_ , lowercase_ , ) return jnp.where( lowercase_ , jnp.where( penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , lowercase_ , ) __lowerCamelCase : int = jax.vmap(lowercase_ )(lowercase_ , lowercase_ ) __lowerCamelCase : Tuple = jnp.where(cur_len == self.begin_index , lowercase_ , lowercase_ ) __lowerCamelCase : str = jnp.where( self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , lowercase_ , ) __lowerCamelCase : Union[str, Any] = self.timestamp_begin + self.max_initial_timestamp_index __lowerCamelCase : Optional[int] = jnp.where( lowercase_ , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , lowercase_ , ) # if sum of probability over timestamps is above any other token, sample timestamp __lowerCamelCase : Union[str, Any] = jax.nn.log_softmax(lowercase_ , axis=-1 ) def handle_cumulative_probs(a: List[Any] , a: List[str] ): __lowerCamelCase : List[Any] = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 ) __lowerCamelCase : Union[str, Any] = jnp.max(logprobs_k[: self.timestamp_begin] ) return jnp.where( timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , lowercase_ , ) __lowerCamelCase : Any = jax.vmap(lowercase_ )(lowercase_ , lowercase_ ) return scores
369
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ): __lowerCamelCase : str = 0 __lowerCamelCase : Tuple = len(SCREAMING_SNAKE_CASE__ ) for i in range(n - 1 ): for j in range(i + 1 , SCREAMING_SNAKE_CASE__ ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ): if len(SCREAMING_SNAKE_CASE__ ) <= 1: return arr, 0 __lowerCamelCase : Optional[int] = len(SCREAMING_SNAKE_CASE__ ) // 2 __lowerCamelCase : Union[str, Any] = arr[0:mid] __lowerCamelCase : List[Any] = arr[mid:] __lowerCamelCase , __lowerCamelCase : Any = count_inversions_recursive(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase , __lowerCamelCase : List[str] = count_inversions_recursive(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase , __lowerCamelCase : Dict = _count_cross_inversions(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase : List[str] = inversion_p + inversions_q + cross_inversions return c, num_inversions def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): __lowerCamelCase : Optional[int] = [] __lowerCamelCase : List[Any] = 0 while i < len(SCREAMING_SNAKE_CASE__ ) and j < len(SCREAMING_SNAKE_CASE__ ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(SCREAMING_SNAKE_CASE__ ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(SCREAMING_SNAKE_CASE__ ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def UpperCamelCase__ ( ): __lowerCamelCase : Optional[int] = [10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) __lowerCamelCase : Optional[Any] = count_inversions_bf(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase , __lowerCamelCase : Optional[Any] = count_inversions_recursive(SCREAMING_SNAKE_CASE__ ) assert num_inversions_bf == num_inversions_recursive == 8 print('number of inversions = ' , SCREAMING_SNAKE_CASE__ ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() __lowerCamelCase : Optional[Any] = count_inversions_bf(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase , __lowerCamelCase : int = count_inversions_recursive(SCREAMING_SNAKE_CASE__ ) assert num_inversions_bf == num_inversions_recursive == 0 print('number of inversions = ' , SCREAMING_SNAKE_CASE__ ) # an empty list should also have zero inversions __lowerCamelCase : List[str] = [] __lowerCamelCase : Dict = count_inversions_bf(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase , __lowerCamelCase : Dict = count_inversions_recursive(SCREAMING_SNAKE_CASE__ ) assert num_inversions_bf == num_inversions_recursive == 0 print('number of inversions = ' , SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": main()
194
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices A =logging.get_logger(__name__) A ={ 'microsoft/swin-tiny-patch4-window7-224': ( 'https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json' ), # See all Swin models at https://huggingface.co/models?filter=swin } class _a ( __a , __a ): __a : Optional[Any] = """swin""" __a : Optional[Any] = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self : str , lowercase : List[Any]=224 , lowercase : Union[str, Any]=4 , lowercase : str=3 , lowercase : Union[str, Any]=96 , lowercase : Any=[2, 2, 6, 2] , lowercase : Tuple=[3, 6, 12, 24] , lowercase : Any=7 , lowercase : Optional[Any]=4.0 , lowercase : int=True , lowercase : int=0.0 , lowercase : Optional[int]=0.0 , lowercase : Dict=0.1 , lowercase : Tuple="gelu" , lowercase : Any=False , lowercase : int=0.02 , lowercase : List[str]=1E-5 , lowercase : int=32 , lowercase : Any=None , lowercase : str=None , **lowercase : List[Any] , ): '''simple docstring''' super().__init__(**lowercase ) UpperCAmelCase = image_size UpperCAmelCase = patch_size UpperCAmelCase = num_channels UpperCAmelCase = embed_dim UpperCAmelCase = depths UpperCAmelCase = len(lowercase ) UpperCAmelCase = num_heads UpperCAmelCase = window_size UpperCAmelCase = mlp_ratio UpperCAmelCase = qkv_bias UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = drop_path_rate UpperCAmelCase = hidden_act UpperCAmelCase = use_absolute_embeddings UpperCAmelCase = layer_norm_eps UpperCAmelCase = initializer_range UpperCAmelCase = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model UpperCAmelCase = int(embed_dim * 2 ** (len(lowercase ) - 1) ) UpperCAmelCase = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(lowercase ) + 1 )] UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices( out_features=lowercase , out_indices=lowercase , stage_names=self.stage_names ) class _a ( __a ): __a : Optional[Any] = version.parse("""1.11""" ) @property def A ( self : Tuple ): '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def A ( self : int ): '''simple docstring''' return 1E-4
34
import random from .binary_exp_mod import bin_exp_mod def UpperCamelCase__ ( A__ , A__=1000 ) -> Optional[int]: if n < 2: return False if n % 2 == 0: return n == 2 # this means n is odd snake_case__ : List[Any] = n - 1 snake_case__ : Optional[int] = 0 while d % 2 == 0: d /= 2 exp += 1 # n - 1=d*(2**exp) snake_case__ : Union[str, Any] = 0 while count < prec: snake_case__ : Dict = random.randint(2 , n - 1 ) snake_case__ : Dict = bin_exp_mod(A__ , A__ , A__ ) if b != 1: snake_case__ : Tuple = True for _ in range(A__ ): if b == n - 1: snake_case__ : List[str] = False break snake_case__ : Dict = b * b b %= n if flag: return False count += 1 return True if __name__ == "__main__": lowerCAmelCase__ : str = abs(int(input('''Enter bound : ''').strip())) print('''Here\'s the list of primes:''') print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
143
0
import argparse import torch from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert from transformers.utils import logging logging.set_verbosity_info() def __UpperCamelCase ( _A : str , _A : List[Any] , _A : Optional[Any] ) ->Any: """simple docstring""" # Initialise PyTorch model lowerCamelCase_ =LxmertConfig.from_json_file(_A ) print(f'Building PyTorch model from configuration: {config}' ) lowerCamelCase_ =LxmertForPreTraining(_A ) # Load weights from tf checkpoint load_tf_weights_in_lxmert(_A , _A , _A ) # Save pytorch-model print(f'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , _A ) if __name__ == "__main__": __A : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __A : Optional[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
49
import os import tempfile import unittest from transformers import NezhaConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, ) from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST class _SCREAMING_SNAKE_CASE : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , )-> str: lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =seq_length lowerCamelCase_ =is_training lowerCamelCase_ =use_input_mask lowerCamelCase_ =use_token_type_ids lowerCamelCase_ =use_labels lowerCamelCase_ =vocab_size lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_act lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =type_vocab_size lowerCamelCase_ =type_sequence_label_size lowerCamelCase_ =initializer_range lowerCamelCase_ =num_labels lowerCamelCase_ =num_choices lowerCamelCase_ =scope def _snake_case ( self )-> int: lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase_ =None if self.use_input_mask: lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ =None if self.use_token_type_ids: lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase_ =None lowerCamelCase_ =None lowerCamelCase_ =None if self.use_labels: lowerCamelCase_ =ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase_ =ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase_ =self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _snake_case ( self )-> Dict: return NezhaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) def _snake_case ( self )-> Tuple: ( ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ) =self.prepare_config_and_inputs() lowerCamelCase_ =True lowerCamelCase_ =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Optional[int]: lowerCamelCase_ =NezhaModel(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> Tuple: lowerCamelCase_ =True lowerCamelCase_ =NezhaModel(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() lowerCamelCase_ =model( _SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , encoder_attention_mask=_SCREAMING_SNAKE_CASE , ) lowerCamelCase_ =model( _SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , ) lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> List[str]: lowerCamelCase_ =NezhaForMaskedLM(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Tuple: lowerCamelCase_ =NezhaForNextSentencePrediction(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() lowerCamelCase_ =model( _SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> int: lowerCamelCase_ =NezhaForPreTraining(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() lowerCamelCase_ =model( _SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , next_sentence_label=_SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Dict: lowerCamelCase_ =NezhaForQuestionAnswering(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() lowerCamelCase_ =model( _SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> int: lowerCamelCase_ =self.num_labels lowerCamelCase_ =NezhaForSequenceClassification(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Dict: lowerCamelCase_ =self.num_labels lowerCamelCase_ =NezhaForTokenClassification(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Tuple: lowerCamelCase_ =self.num_choices lowerCamelCase_ =NezhaForMultipleChoice(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() lowerCamelCase_ =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase_ =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase_ =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase_ =model( _SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _snake_case ( self )-> List[str]: lowerCamelCase_ =self.prepare_config_and_inputs() ( ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ) =config_and_inputs lowerCamelCase_ ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase): _UpperCamelCase:Optional[int] = ( ( NezhaModel, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, ) if is_torch_available() else () ) _UpperCamelCase:int = ( { "feature-extraction": NezhaModel, "fill-mask": NezhaForMaskedLM, "question-answering": NezhaForQuestionAnswering, "text-classification": NezhaForSequenceClassification, "token-classification": NezhaForTokenClassification, "zero-shot": NezhaForSequenceClassification, } if is_torch_available() else {} ) _UpperCamelCase:Tuple = True def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False )-> Optional[Any]: lowerCamelCase_ =super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) if return_labels: if model_class in get_values(_SCREAMING_SNAKE_CASE ): lowerCamelCase_ =torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE ) return inputs_dict def _snake_case ( self )-> Dict: lowerCamelCase_ =NezhaModelTester(self ) lowerCamelCase_ =ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 ) def _snake_case ( self )-> List[str]: self.config_tester.run_common_tests() def _snake_case ( self )-> str: lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> Union[str, Any]: lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*_SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> List[Any]: # This regression test was failing with PyTorch < 1.3 ( ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ) =self.model_tester.prepare_config_and_inputs_for_decoder() lowerCamelCase_ =None self.model_tester.create_and_check_model_as_decoder( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) def _snake_case ( self )-> Dict: lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> List[Any]: lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> List[Any]: lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*_SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> Optional[Any]: lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*_SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> Union[str, Any]: lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> Tuple: lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> Any: lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_SCREAMING_SNAKE_CASE ) @slow def _snake_case ( self )-> Union[str, Any]: for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ =NezhaModel.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) @slow @require_torch_gpu def _snake_case ( self )-> Any: lowerCamelCase_ , lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # NezhaForMultipleChoice behaves incorrectly in JIT environments. if model_class == NezhaForMultipleChoice: return lowerCamelCase_ =True lowerCamelCase_ =model_class(config=_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowerCamelCase_ =torch.jit.trace( _SCREAMING_SNAKE_CASE , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , """bert.pt""" ) ) lowerCamelCase_ =torch.jit.load(os.path.join(_SCREAMING_SNAKE_CASE , """bert.pt""" ) , map_location=_SCREAMING_SNAKE_CASE ) loaded(inputs_dict["""input_ids"""].to(_SCREAMING_SNAKE_CASE ) , inputs_dict["""attention_mask"""].to(_SCREAMING_SNAKE_CASE ) ) @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase): @slow def _snake_case ( self )-> Dict: lowerCamelCase_ =NezhaModel.from_pretrained("""sijunhe/nezha-cn-base""" ) lowerCamelCase_ =torch.tensor([[0, 1, 2, 3, 4, 5]] ) lowerCamelCase_ =torch.tensor([[0, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )[0] lowerCamelCase_ =torch.Size((1, 6, 768) ) self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE ) lowerCamelCase_ =torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) ) @slow def _snake_case ( self )-> Optional[Any]: lowerCamelCase_ =NezhaForMaskedLM.from_pretrained("""sijunhe/nezha-cn-base""" ) lowerCamelCase_ =torch.tensor([[0, 1, 2, 3, 4, 5]] ) lowerCamelCase_ =torch.tensor([[1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )[0] lowerCamelCase_ =torch.Size((1, 6, 2_1128) ) self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE ) lowerCamelCase_ =torch.tensor( [[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
49
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a : Tuple = { "configuration_megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Tuple = [ "MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST", "MegatronBertForCausalLM", "MegatronBertForMaskedLM", "MegatronBertForMultipleChoice", "MegatronBertForNextSentencePrediction", "MegatronBertForPreTraining", "MegatronBertForQuestionAnswering", "MegatronBertForSequenceClassification", "MegatronBertForTokenClassification", "MegatronBertModel", "MegatronBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_megatron_bert import ( MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, MegatronBertPreTrainedModel, ) else: import sys a : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
311
'''simple docstring''' from datetime import datetime import matplotlib.pyplot as plt import torch def lowercase ( __magic_name__ ): '''simple docstring''' for param in module.parameters(): UpperCAmelCase : Any = False def lowercase ( ): '''simple docstring''' UpperCAmelCase : int = "cuda" if torch.cuda.is_available() else "cpu" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): UpperCAmelCase : int = "mps" if device == "mps": print( "WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch" " errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues" " with generations." ) return device def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : str = plt.imshow(__magic_name__ ) fig.axes.get_xaxis().set_visible(__magic_name__ ) fig.axes.get_yaxis().set_visible(__magic_name__ ) plt.show() def lowercase ( ): '''simple docstring''' UpperCAmelCase : str = datetime.now() UpperCAmelCase : Tuple = current_time.strftime("%H:%M:%S" ) return timestamp
311
1
import argparse import logging from collections import namedtuple import torch from model_bertabs import BertAbsSummarizer from models.model_builder import AbsSummarizer # The authors' implementation from transformers import BertTokenizer logging.basicConfig(level=logging.INFO) lowerCAmelCase__ : Optional[int] =logging.getLogger(__name__) lowerCAmelCase__ : str ='''Hello world! cécé herlolip''' lowerCAmelCase__ : int =namedtuple( '''BertAbsConfig''', [ '''temp_dir''', '''large''', '''use_bert_emb''', '''finetune_bert''', '''encoder''', '''share_emb''', '''max_pos''', '''enc_layers''', '''enc_hidden_size''', '''enc_heads''', '''enc_ff_size''', '''enc_dropout''', '''dec_layers''', '''dec_hidden_size''', '''dec_heads''', '''dec_ff_size''', '''dec_dropout''', ], ) def __lowercase ( a__ , a__ ) -> Optional[int]: __SCREAMING_SNAKE_CASE = BertAbsConfig( temp_dir='.' , finetune_bert=a__ , large=a__ , share_emb=a__ , use_bert_emb=a__ , encoder='bert' , max_pos=5_12 , enc_layers=6 , enc_hidden_size=5_12 , enc_heads=8 , enc_ff_size=5_12 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_68 , dec_heads=8 , dec_ff_size=20_48 , dec_dropout=0.2 , ) __SCREAMING_SNAKE_CASE = torch.load(a__ , lambda a__ , a__ : storage ) __SCREAMING_SNAKE_CASE = AbsSummarizer(a__ , torch.device('cpu' ) , a__ ) original.eval() __SCREAMING_SNAKE_CASE = BertAbsSummarizer(a__ , torch.device('cpu' ) ) new_model.eval() # ------------------- # Convert the weights # ------------------- logging.info('convert the model' ) new_model.bert.load_state_dict(original.bert.state_dict() ) new_model.decoder.load_state_dict(original.decoder.state_dict() ) new_model.generator.load_state_dict(original.generator.state_dict() ) # ---------------------------------- # Make sure the outpus are identical # ---------------------------------- logging.info('Make sure that the models\' outputs are identical' ) __SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained('bert-base-uncased' ) # prepare the model inputs __SCREAMING_SNAKE_CASE = tokenizer.encode('This is sample éàalj\'-.' ) encoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(a__ )) ) __SCREAMING_SNAKE_CASE = torch.tensor(a__ ).unsqueeze(0 ) __SCREAMING_SNAKE_CASE = tokenizer.encode('This is sample 3 éàalj\'-.' ) decoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(a__ )) ) __SCREAMING_SNAKE_CASE = torch.tensor(a__ ).unsqueeze(0 ) # failsafe to make sure the weights reset does not affect the # loaded weights. assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0 # forward pass __SCREAMING_SNAKE_CASE = encoder_input_ids __SCREAMING_SNAKE_CASE = decoder_input_ids __SCREAMING_SNAKE_CASE = __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None # The original model does not apply the geneator layer immediatly but rather in # the beam search (where it combines softmax + linear layer). Since we already # apply the softmax in our generation process we only apply the linear layer here. # We make sure that the outputs of the full stack are identical __SCREAMING_SNAKE_CASE = original(a__ , a__ , a__ , a__ , a__ , a__ , a__ )[0] __SCREAMING_SNAKE_CASE = original.generator(a__ ) __SCREAMING_SNAKE_CASE = new_model( a__ , a__ , a__ , a__ , a__ )[0] __SCREAMING_SNAKE_CASE = new_model.generator(a__ ) __SCREAMING_SNAKE_CASE = torch.max(torch.abs(output_converted_model - output_original_model ) ).item() print('Maximum absolute difference beween weights: {:.2f}'.format(a__ ) ) __SCREAMING_SNAKE_CASE = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item() print('Maximum absolute difference beween weights: {:.2f}'.format(a__ ) ) __SCREAMING_SNAKE_CASE = torch.allclose(a__ , a__ , atol=1E-3 ) if are_identical: logging.info('all weights are equal up to 1e-3' ) else: raise ValueError('the weights are different. The new model is likely different from the original one.' ) # The model has been saved with torch.save(model) and this is bound to the exact # directory structure. We save the state_dict instead. logging.info('saving the model\'s state dictionary' ) torch.save( new_model.state_dict() , './bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin' ) if __name__ == "__main__": lowerCAmelCase__ : List[str] =argparse.ArgumentParser() parser.add_argument( '''--bertabs_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''', ) lowerCAmelCase__ : Tuple =parser.parse_args() convert_bertabs_checkpoints( args.bertabs_checkpoint_path, args.pytorch_dump_folder_path, )
118
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ : Optional[int] =logging.get_logger(__name__) def __lowercase ( a__ , a__=False ) -> Tuple: __SCREAMING_SNAKE_CASE = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""deit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""deit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""deit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""deit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""deit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""deit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""deit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""deit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""deit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""deit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ('cls_token', 'deit.embeddings.cls_token'), ('dist_token', 'deit.embeddings.distillation_token'), ('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'), ('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'), ('pos_embed', 'deit.embeddings.position_embeddings'), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('norm.weight', 'layernorm.weight'), ('norm.bias', 'layernorm.bias'), ('pre_logits.fc.weight', 'pooler.dense.weight'), ('pre_logits.fc.bias', 'pooler.dense.bias'), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" __SCREAMING_SNAKE_CASE = [(pair[0], pair[1][4:]) if pair[1].startswith('deit' ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ('norm.weight', 'deit.layernorm.weight'), ('norm.bias', 'deit.layernorm.bias'), ('head.weight', 'cls_classifier.weight'), ('head.bias', 'cls_classifier.bias'), ('head_dist.weight', 'distillation_classifier.weight'), ('head_dist.bias', 'distillation_classifier.bias'), ] ) return rename_keys def __lowercase ( a__ , a__ , a__=False ) -> Tuple: for i in range(config.num_hidden_layers ): if base_model: __SCREAMING_SNAKE_CASE = '' else: __SCREAMING_SNAKE_CASE = 'deit.' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) __SCREAMING_SNAKE_CASE = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" ) __SCREAMING_SNAKE_CASE = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict __SCREAMING_SNAKE_CASE = in_proj_weight[ : config.hidden_size, : ] __SCREAMING_SNAKE_CASE = in_proj_bias[: config.hidden_size] __SCREAMING_SNAKE_CASE = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] __SCREAMING_SNAKE_CASE = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] __SCREAMING_SNAKE_CASE = in_proj_weight[ -config.hidden_size :, : ] __SCREAMING_SNAKE_CASE = in_proj_bias[-config.hidden_size :] def __lowercase ( a__ , a__ , a__ ) -> str: __SCREAMING_SNAKE_CASE = dct.pop(a__ ) __SCREAMING_SNAKE_CASE = val def __lowercase ( ) -> List[Any]: __SCREAMING_SNAKE_CASE = 'http://images.cocodataset.org/val2017/000000039769.jpg' __SCREAMING_SNAKE_CASE = Image.open(requests.get(a__ , stream=a__ ).raw ) return im @torch.no_grad() def __lowercase ( a__ , a__ ) -> Dict: __SCREAMING_SNAKE_CASE = DeiTConfig() # all deit models have fine-tuned heads __SCREAMING_SNAKE_CASE = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size __SCREAMING_SNAKE_CASE = 10_00 __SCREAMING_SNAKE_CASE = 'huggingface/label-files' __SCREAMING_SNAKE_CASE = 'imagenet-1k-id2label.json' __SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(a__ , a__ , repo_type='dataset' ) , 'r' ) ) __SCREAMING_SNAKE_CASE = {int(a__ ): v for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE = idalabel __SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE = int(deit_name[-6:-4] ) __SCREAMING_SNAKE_CASE = int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith('tiny' ): __SCREAMING_SNAKE_CASE = 1_92 __SCREAMING_SNAKE_CASE = 7_68 __SCREAMING_SNAKE_CASE = 12 __SCREAMING_SNAKE_CASE = 3 elif deit_name[9:].startswith('small' ): __SCREAMING_SNAKE_CASE = 3_84 __SCREAMING_SNAKE_CASE = 15_36 __SCREAMING_SNAKE_CASE = 12 __SCREAMING_SNAKE_CASE = 6 if deit_name[9:].startswith('base' ): pass elif deit_name[4:].startswith('large' ): __SCREAMING_SNAKE_CASE = 10_24 __SCREAMING_SNAKE_CASE = 40_96 __SCREAMING_SNAKE_CASE = 24 __SCREAMING_SNAKE_CASE = 16 # load original model from timm __SCREAMING_SNAKE_CASE = timm.create_model(a__ , pretrained=a__ ) timm_model.eval() # load state_dict of original model, remove and rename some keys __SCREAMING_SNAKE_CASE = timm_model.state_dict() __SCREAMING_SNAKE_CASE = create_rename_keys(a__ , a__ ) for src, dest in rename_keys: rename_key(a__ , a__ , a__ ) read_in_q_k_v(a__ , a__ , a__ ) # load HuggingFace model __SCREAMING_SNAKE_CASE = DeiTForImageClassificationWithTeacher(a__ ).eval() model.load_state_dict(a__ ) # Check outputs on an image, prepared by DeiTImageProcessor __SCREAMING_SNAKE_CASE = int( (2_56 / 2_24) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 __SCREAMING_SNAKE_CASE = DeiTImageProcessor(size=a__ , crop_size=config.image_size ) __SCREAMING_SNAKE_CASE = image_processor(images=prepare_img() , return_tensors='pt' ) __SCREAMING_SNAKE_CASE = encoding['pixel_values'] __SCREAMING_SNAKE_CASE = model(a__ ) __SCREAMING_SNAKE_CASE = timm_model(a__ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(a__ , outputs.logits , atol=1E-3 ) Path(a__ ).mkdir(exist_ok=a__ ) print(f"""Saving model {deit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(a__ ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(a__ ) if __name__ == "__main__": lowerCAmelCase__ : Union[str, Any] =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--deit_name''', default='''vit_deit_base_distilled_patch16_224''', type=str, help='''Name of the DeiT timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) lowerCAmelCase__ : str =parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
118
1
import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class lowercase : def __init__( self , snake_case , snake_case=2 , snake_case=8 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=99 , snake_case=16 , snake_case=5 , snake_case=2 , snake_case=36 , snake_case="gelu" , snake_case=0.0 , snake_case=0.0 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ): snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_input_mask snake_case_ = use_token_type_ids snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = type_sequence_label_size snake_case_ = initializer_range snake_case_ = num_labels snake_case_ = num_choices snake_case_ = scope def a ( self ): snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = None if self.use_input_mask: snake_case_ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ = None if self.use_token_type_ids: snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case_ = None snake_case_ = None snake_case_ = None if self.use_labels: snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case_ = ids_tensor([self.batch_size] , self.num_choices ) snake_case_ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def a ( self ): return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def a ( self ): snake_case_ = self.get_config() snake_case_ = 300 return config def a ( self ): ( snake_case_ ) = self.prepare_config_and_inputs() snake_case_ = True snake_case_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) snake_case_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): snake_case_ = MraModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() snake_case_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) snake_case_ = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) snake_case_ = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ): snake_case_ = True snake_case_ = MraModel(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() snake_case_ = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , ) snake_case_ = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , ) snake_case_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): snake_case_ = MraForMaskedLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() snake_case_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): snake_case_ = MraForQuestionAnswering(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() snake_case_ = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): snake_case_ = self.num_labels snake_case_ = MraForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() snake_case_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): snake_case_ = self.num_labels snake_case_ = MraForTokenClassification(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() snake_case_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): snake_case_ = self.num_choices snake_case_ = MraForMultipleChoice(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() snake_case_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case_ = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def a ( self ): snake_case_ = self.prepare_config_and_inputs() ( snake_case_ ) = config_and_inputs snake_case_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowercase ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Dict = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) __SCREAMING_SNAKE_CASE : str = False __SCREAMING_SNAKE_CASE : Union[str, Any] = False __SCREAMING_SNAKE_CASE : Optional[Any] = False __SCREAMING_SNAKE_CASE : int = False __SCREAMING_SNAKE_CASE : int = () def a ( self ): snake_case_ = MraModelTester(self ) snake_case_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def a ( self ): self.config_tester.run_common_tests() def a ( self ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def a ( self ): snake_case_ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: snake_case_ = type self.model_tester.create_and_check_model(*__UpperCAmelCase ) def a ( self ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase ) def a ( self ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase ) def a ( self ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase ) def a ( self ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase ) def a ( self ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase ) @slow def a ( self ): for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ = MraModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @unittest.skip(reason='MRA does not output attentions' ) def a ( self ): return @require_torch class lowercase ( unittest.TestCase ): @slow def a ( self ): snake_case_ = MraModel.from_pretrained('uw-madison/mra-base-512-4' ) snake_case_ = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): snake_case_ = model(__UpperCAmelCase )[0] snake_case_ = torch.Size((1, 256, 768) ) self.assertEqual(output.shape , __UpperCAmelCase ) snake_case_ = torch.tensor( [[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1e-4 ) ) @slow def a ( self ): snake_case_ = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' ) snake_case_ = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): snake_case_ = model(__UpperCAmelCase )[0] snake_case_ = 5_0265 snake_case_ = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape , __UpperCAmelCase ) snake_case_ = torch.tensor( [[[9.25_95, -3.60_38, 11.8819], [9.38_69, -3.26_93, 11.0956], [11.8524, -3.49_38, 13.1210]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1e-4 ) ) @slow def a ( self ): snake_case_ = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' ) snake_case_ = torch.arange(4096 ).unsqueeze(0 ) with torch.no_grad(): snake_case_ = model(__UpperCAmelCase )[0] snake_case_ = 5_0265 snake_case_ = torch.Size((1, 4096, vocab_size) ) self.assertEqual(output.shape , __UpperCAmelCase ) snake_case_ = torch.tensor( [[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1e-4 ) )
285
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: lowerCAmelCase__ : Tuple = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" ) lowerCAmelCase__ : Optional[int] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] ) # The dog is cute and lives in the garden house lowerCAmelCase__ : str = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim lowerCAmelCase__ : Dict = torch.tensor( [[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): lowerCAmelCase__ : Optional[int] = model(__UpperCAmelCase )["""last_hidden_state"""].detach() self.assertEqual(output.shape ,__UpperCAmelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] ,__UpperCAmelCase ,atol=1E-3 ) ) @slow def UpperCAmelCase_ ( self ) -> int: lowerCAmelCase__ : Union[str, Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" ) lowerCAmelCase__ : Optional[Any] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] ) # The dog is cute and lives in the garden house lowerCAmelCase__ : Dict = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim lowerCAmelCase__ : Union[str, Any] = torch.tensor( [[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase )["""last_hidden_state"""].detach() self.assertEqual(output.shape ,__UpperCAmelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] ,__UpperCAmelCase ,atol=1E-3 ) )
37
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __lowercase = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = ['''ReformerTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = ['''ReformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ '''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ReformerAttention''', '''ReformerForMaskedLM''', '''ReformerForQuestionAnswering''', '''ReformerForSequenceClassification''', '''ReformerLayer''', '''ReformerModel''', '''ReformerModelWithLMHead''', '''ReformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys __lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
369
from __future__ import annotations def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' print(f"""Vertex\tShortest Distance from vertex {src}""" ) for i, d in enumerate(SCREAMING_SNAKE_CASE ): print(f"""{i}\t\t{d}""" ) def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' for j in range(SCREAMING_SNAKE_CASE ): __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :int = (graph[j][k] for k in ['''src''', '''dst''', '''weight''']) if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]: return True return False def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase :List[Any] = [float('''inf''' )] * vertex_count __UpperCamelCase :str = 0.0 for _ in range(vertex_count - 1 ): for j in range(SCREAMING_SNAKE_CASE ): __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Dict = (graph[j][k] for k in ['''src''', '''dst''', '''weight''']) if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]: __UpperCamelCase :Any = distance[u] + w __UpperCamelCase :Tuple = check_negative_cycle(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if negative_cycle_exists: raise Exception('''Negative cycle found''' ) return distance if __name__ == "__main__": import doctest doctest.testmod() __lowercase = int(input('''Enter number of vertices: ''').strip()) __lowercase = int(input('''Enter number of edges: ''').strip()) __lowercase = [{} for _ in range(E)] for i in range(E): print('''Edge ''', i + 1) __lowercase , __lowercase , __lowercase = ( int(x) for x in input('''Enter source, destination, weight: ''').strip().split(''' ''') ) __lowercase = {'''src''': src, '''dst''': dest, '''weight''': weight} __lowercase = int(input('''\nEnter shortest path source:''').strip()) __lowercase = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
105
0
import json import os import re import unittest from transformers import CodeGenTokenizer, CodeGenTokenizerFast from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __A( __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = CodeGenTokenizer SCREAMING_SNAKE_CASE__ = CodeGenTokenizerFast SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = {"""add_prefix_space""": True} SCREAMING_SNAKE_CASE__ = False def UpperCAmelCase_ (self ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCamelCase__ = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", """<|endoftext|>""", ] UpperCamelCase__ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) UpperCamelCase__ = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] UpperCamelCase__ = {"""unk_token""": """<unk>"""} UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(SCREAMING_SNAKE_CASE_ ) ) def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ): kwargs.update(self.special_tokens_map ) return CodeGenTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ): kwargs.update(self.special_tokens_map ) return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = """lower newer""" UpperCamelCase__ = """lower newer""" return input_text, output_text def UpperCAmelCase_ (self ): UpperCamelCase__ = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) UpperCamelCase__ = """lower newer""" UpperCamelCase__ = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""] UpperCamelCase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokens + [tokenizer.unk_token] UpperCamelCase__ = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): if not self.test_rust_tokenizer: return UpperCamelCase__ = self.get_tokenizer() UpperCamelCase__ = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = """lower newer""" # Testing tokenization UpperCamelCase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Testing conversion to ids without special tokens UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Testing conversion to ids with special tokens UpperCamelCase__ = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Testing the unknown token UpperCamelCase__ = tokens + [rust_tokenizer.unk_token] UpperCamelCase__ = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): # It's very difficult to mix/test pretokenization with byte-level # And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_=15 ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) # Simple input UpperCamelCase__ = """This is a simple input""" UpperCamelCase__ = ["""This is a simple input 1""", """This is a simple input 2"""] UpperCamelCase__ = ("""This is a simple input""", """This is a pair""") UpperCamelCase__ = [ ("""This is a simple input 1""", """This is a simple input 2"""), ("""This is a simple pair 1""", """This is a simple pair 2"""), ] # Simple input tests self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding="""max_length""" ) # Simple input self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding="""max_length""" ) # Simple input self.assertRaises( SCREAMING_SNAKE_CASE_ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding="""max_length""" , ) # Pair input self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding="""max_length""" ) # Pair input self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding="""max_length""" ) # Pair input self.assertRaises( SCREAMING_SNAKE_CASE_ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding="""max_length""" , ) def UpperCAmelCase_ (self ): UpperCamelCase__ = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" ) # Simple input UpperCamelCase__ = """This is a simple input""" UpperCamelCase__ = ["""This is a simple input looooooooong""", """This is a simple input"""] UpperCamelCase__ = ("""This is a simple input""", """This is a pair""") UpperCamelCase__ = [ ("""This is a simple input loooooong""", """This is a simple input"""), ("""This is a simple pair loooooong""", """This is a simple pair"""), ] UpperCamelCase__ = tokenizer.pad_token_id UpperCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=30 , return_tensors="""np""" ) UpperCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncate=SCREAMING_SNAKE_CASE_ , return_tensors="""np""" ) UpperCamelCase__ = tokenizer(*SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=60 , return_tensors="""np""" ) UpperCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncate=SCREAMING_SNAKE_CASE_ , return_tensors="""np""" ) # s # test single string max_length padding self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s["""input_ids"""] ) self.assertTrue(0 in out_s["""attention_mask"""] ) # s2 # test automatic padding self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] ) self.assertFalse(0 in out_sa["""attention_mask"""][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] ) self.assertTrue(0 in out_sa["""attention_mask"""][1] ) # p # test single pair max_length padding self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p["""input_ids"""] ) self.assertTrue(0 in out_p["""attention_mask"""] ) # p2 # test automatic padding pair self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] ) self.assertFalse(0 in out_pa["""attention_mask"""][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] ) self.assertTrue(0 in out_pa["""attention_mask"""][1] ) def UpperCAmelCase_ (self ): UpperCamelCase__ = """$$$""" UpperCamelCase__ = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=SCREAMING_SNAKE_CASE_ , add_bos_token=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = """This is a simple input""" UpperCamelCase__ = ["""This is a simple input 1""", """This is a simple input 2"""] UpperCamelCase__ = tokenizer.bos_token_id UpperCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE_ ) self.assertEqual(out_s.input_ids[0] , SCREAMING_SNAKE_CASE_ ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) UpperCamelCase__ = tokenizer.decode(out_s.input_ids ) UpperCamelCase__ = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , SCREAMING_SNAKE_CASE_ ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) @slow def UpperCAmelCase_ (self ): UpperCamelCase__ = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" ) UpperCamelCase__ = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#""" UpperCamelCase__ = """\nif len_a > len_b: result = a\nelse: result = b""" UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""] UpperCamelCase__ = tokenizer.decode(SCREAMING_SNAKE_CASE_ , truncate_before_pattern=SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): pass
244
import torch from diffusers import StableDiffusionPipeline lowerCamelCase_ = '''path-to-your-trained-model''' lowerCamelCase_ = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''') lowerCamelCase_ = '''A photo of sks dog in a bucket''' lowerCamelCase_ = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] image.save('''dog-bucket.png''')
244
1
'''simple docstring''' import logging import os import threading import time try: import warnings except ImportError: __UpperCAmelCase =None try: import msvcrt except ImportError: __UpperCAmelCase =None try: import fcntl except ImportError: __UpperCAmelCase =None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: __UpperCAmelCase =OSError # Data # ------------------------------------------------ __UpperCAmelCase =[ "Timeout", "BaseFileLock", "WindowsFileLock", "UnixFileLock", "SoftFileLock", "FileLock", ] __UpperCAmelCase ="3.0.12" __UpperCAmelCase =None def __lowerCAmelCase ( ) -> Tuple: global _logger __lowerCamelCase = _logger or logging.getLogger(__name__ ) return _logger class a__ ( UpperCAmelCase__ ): def __init__( self : Dict , a : int ): """simple docstring""" __lowerCamelCase = lock_file return None def __str__( self : Optional[Any] ): """simple docstring""" __lowerCamelCase = f"""The file lock '{self.lock_file}' could not be acquired.""" return temp class a__ : def __init__( self : List[str] , a : str ): """simple docstring""" __lowerCamelCase = lock return None def __enter__( self : Dict ): """simple docstring""" return self.lock def __exit__( self : Optional[Any] , a : List[str] , a : int , a : str ): """simple docstring""" self.lock.release() return None class a__ : def __init__( self : Tuple , a : Union[str, Any] , a : Any=-1 , a : Optional[int]=None ): """simple docstring""" __lowerCamelCase = max_filename_length if max_filename_length is not None else 2_55 # Hash the filename if it's too long __lowerCamelCase = self.hash_filename_if_too_long(a , a ) # The path to the lock file. __lowerCamelCase = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. __lowerCamelCase = None # The default timeout value. __lowerCamelCase = timeout # We use this lock primarily for the lock counter. __lowerCamelCase = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. __lowerCamelCase = 0 return None @property def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" return self._lock_file @property def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" return self._timeout @timeout.setter def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : str ): """simple docstring""" __lowerCamelCase = float(a ) return None def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" raise NotImplementedError() def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" raise NotImplementedError() @property def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" return self._lock_file_fd is not None def SCREAMING_SNAKE_CASE__ ( self : int , a : Dict=None , a : Optional[Any]=0.05 ): """simple docstring""" if timeout is None: __lowerCamelCase = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 __lowerCamelCase = id(self ) __lowerCamelCase = self._lock_file __lowerCamelCase = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(f"""Attempting to acquire lock {lock_id} on {lock_filename}""" ) self._acquire() if self.is_locked: logger().debug(f"""Lock {lock_id} acquired on {lock_filename}""" ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(f"""Timeout on acquiring lock {lock_id} on {lock_filename}""" ) raise Timeout(self._lock_file ) else: logger().debug( f"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" ) time.sleep(a ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: __lowerCamelCase = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def SCREAMING_SNAKE_CASE__ ( self : str , a : Optional[int]=False ): """simple docstring""" with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: __lowerCamelCase = id(self ) __lowerCamelCase = self._lock_file logger().debug(f"""Attempting to release lock {lock_id} on {lock_filename}""" ) self._release() __lowerCamelCase = 0 logger().debug(f"""Lock {lock_id} released on {lock_filename}""" ) return None def __enter__( self : List[str] ): """simple docstring""" self.acquire() return self def __exit__( self : Optional[int] , a : str , a : Dict , a : Any ): """simple docstring""" self.release() return None def __del__( self : Any ): """simple docstring""" self.release(force=a ) return None def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : str , a : int ): """simple docstring""" __lowerCamelCase = os.path.basename(a ) if len(a ) > max_length and max_length > 0: __lowerCamelCase = os.path.dirname(a ) __lowerCamelCase = str(hash(a ) ) __lowerCamelCase = filename[: max_length - len(a ) - 8] + '''...''' + hashed_filename + '''.lock''' return os.path.join(a , a ) else: return path class a__ ( UpperCAmelCase__ ): def __init__( self : Union[str, Any] , a : Optional[int] , a : Optional[Any]=-1 , a : str=None ): """simple docstring""" from .file_utils import relative_to_absolute_path super().__init__(a , timeout=a , max_filename_length=a ) __lowerCamelCase = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" __lowerCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: __lowerCamelCase = os.open(self._lock_file , a ) except OSError: pass else: try: msvcrt.locking(a , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(a ) else: __lowerCamelCase = fd return None def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" __lowerCamelCase = self._lock_file_fd __lowerCamelCase = None msvcrt.locking(a , msvcrt.LK_UNLCK , 1 ) os.close(a ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class a__ ( UpperCAmelCase__ ): def __init__( self : Dict , a : List[Any] , a : int=-1 , a : Any=None ): """simple docstring""" __lowerCamelCase = os.statvfs(os.path.dirname(a ) ).f_namemax super().__init__(a , timeout=a , max_filename_length=a ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" __lowerCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC __lowerCamelCase = os.open(self._lock_file , a ) try: fcntl.flock(a , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(a ) else: __lowerCamelCase = fd return None def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" __lowerCamelCase = self._lock_file_fd __lowerCamelCase = None fcntl.flock(a , fcntl.LOCK_UN ) os.close(a ) return None class a__ ( UpperCAmelCase__ ): def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" __lowerCamelCase = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: __lowerCamelCase = os.open(self._lock_file , a ) except OSError: pass else: __lowerCamelCase = fd return None def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" os.close(self._lock_file_fd ) __lowerCamelCase = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None __UpperCAmelCase =None if msvcrt: __UpperCAmelCase =WindowsFileLock elif fcntl: __UpperCAmelCase =UnixFileLock else: __UpperCAmelCase =SoftFileLock if warnings is not None: warnings.warn("only soft file lock is available")
237
'''simple docstring''' import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class a__ ( UpperCAmelCase__ , unittest.TestCase ): lowerCamelCase : Optional[Any] =LongformerTokenizer lowerCamelCase : Optional[Any] =True lowerCamelCase : List[str] =LongformerTokenizerFast lowerCamelCase : Union[str, Any] =True def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __lowerCamelCase = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] __lowerCamelCase = dict(zip(a , range(len(a ) ) ) ) __lowerCamelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] __lowerCamelCase = {'''unk_token''': '''<unk>'''} __lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(a ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(a ) ) def SCREAMING_SNAKE_CASE__ ( self : int , **a : int ): """simple docstring""" kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **a ) def SCREAMING_SNAKE_CASE__ ( self : str , **a : Dict ): """simple docstring""" kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **a ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : int ): """simple docstring""" __lowerCamelCase = '''lower newer''' __lowerCamelCase = '''lower newer''' return input_text, output_text def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" __lowerCamelCase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) __lowerCamelCase = '''lower newer''' __lowerCamelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] __lowerCamelCase = tokenizer.tokenize(a ) # , add_prefix_space=True) self.assertListEqual(a , a ) __lowerCamelCase = tokens + [tokenizer.unk_token] __lowerCamelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" __lowerCamelCase = self.get_tokenizer() self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=a ) , [0, 3_14_14, 2_32, 3_28, 2] ) self.assertListEqual( tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=a ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" __lowerCamelCase = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' ) __lowerCamelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=a ) __lowerCamelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=a ) __lowerCamelCase = tokenizer.encode( '''sequence builders''' , add_special_tokens=a , add_prefix_space=a ) __lowerCamelCase = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=a , add_prefix_space=a ) __lowerCamelCase = tokenizer.build_inputs_with_special_tokens(a ) __lowerCamelCase = tokenizer.build_inputs_with_special_tokens(a , a ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = '''Encode this sequence.''' __lowerCamelCase = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]] # Testing encoder arguments __lowerCamelCase = tokenizer.encode(a , add_special_tokens=a , add_prefix_space=a ) __lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(a , a ) __lowerCamelCase = tokenizer.encode(a , add_special_tokens=a , add_prefix_space=a ) __lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(a , a ) tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} ) __lowerCamelCase = tokenizer.encode(a , add_special_tokens=a ) __lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(a , a ) # Testing spaces after special tokens __lowerCamelCase = '''<mask>''' tokenizer.add_special_tokens( {'''mask_token''': AddedToken(a , lstrip=a , rstrip=a )} ) # mask token has a left space __lowerCamelCase = tokenizer.convert_tokens_to_ids(a ) __lowerCamelCase = '''Encode <mask> sequence''' __lowerCamelCase = '''Encode <mask>sequence''' __lowerCamelCase = tokenizer.encode(a ) __lowerCamelCase = encoded.index(a ) __lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(a , a ) __lowerCamelCase = tokenizer.encode(a ) __lowerCamelCase = encoded.index(a ) __lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(a , a ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __lowerCamelCase = self.rust_tokenizer_class.from_pretrained(a , **a ) __lowerCamelCase = self.tokenizer_class.from_pretrained(a , **a ) __lowerCamelCase = '''A, <mask> AllenNLP sentence.''' __lowerCamelCase = tokenizer_r.encode_plus(a , add_special_tokens=a , return_token_type_ids=a ) __lowerCamelCase = tokenizer_p.encode_plus(a , add_special_tokens=a , return_token_type_ids=a ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , ) __lowerCamelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] ) __lowerCamelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( a , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) self.assertSequenceEqual( a , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): __lowerCamelCase = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=a , add_prefix_space=a , trim_offsets=a ) __lowerCamelCase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) __lowerCamelCase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , a ) self.assertEqual(post_processor_state['''add_prefix_space'''] , a ) self.assertEqual(post_processor_state['''trim_offsets'''] , a ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __lowerCamelCase = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name` __lowerCamelCase = f"""{text_of_1_token} {text_of_1_token}""" __lowerCamelCase = self.rust_tokenizer_class.from_pretrained( a , use_fast=a , add_prefix_space=a , trim_offsets=a ) __lowerCamelCase = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a ) self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) ) self.assertEqual( encoding.offset_mapping[1] , (len(a ) + 1, len(a ) + 1 + len(a )) , ) __lowerCamelCase = self.rust_tokenizer_class.from_pretrained( a , use_fast=a , add_prefix_space=a , trim_offsets=a ) __lowerCamelCase = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a ) self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) ) self.assertEqual( encoding.offset_mapping[1] , (len(a ) + 1, len(a ) + 1 + len(a )) , ) __lowerCamelCase = self.rust_tokenizer_class.from_pretrained( a , use_fast=a , add_prefix_space=a , trim_offsets=a ) __lowerCamelCase = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a ) self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) ) self.assertEqual( encoding.offset_mapping[1] , (len(a ), len(a ) + 1 + len(a )) , ) __lowerCamelCase = self.rust_tokenizer_class.from_pretrained( a , use_fast=a , add_prefix_space=a , trim_offsets=a ) __lowerCamelCase = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a ) self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) ) self.assertEqual( encoding.offset_mapping[1] , (len(a ), len(a ) + 1 + len(a )) , ) __lowerCamelCase = f""" {text}""" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) __lowerCamelCase = self.rust_tokenizer_class.from_pretrained( a , use_fast=a , add_prefix_space=a , trim_offsets=a ) __lowerCamelCase = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(a ) + 1, 1 + len(a ) + 1 + len(a )) , ) __lowerCamelCase = self.rust_tokenizer_class.from_pretrained( a , use_fast=a , add_prefix_space=a , trim_offsets=a ) __lowerCamelCase = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(a )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(a ), 1 + len(a ) + 1 + len(a )) , ) __lowerCamelCase = self.rust_tokenizer_class.from_pretrained( a , use_fast=a , add_prefix_space=a , trim_offsets=a ) __lowerCamelCase = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(a )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(a ), 1 + len(a ) + 1 + len(a )) , )
237
1
from math import isqrt, loga def _A ( SCREAMING_SNAKE_CASE : int ): """simple docstring""" a__ : Optional[Any] =[True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): a__ : List[Any] =False return [i for i in range(2 , SCREAMING_SNAKE_CASE ) if is_prime[i]] def _A ( SCREAMING_SNAKE_CASE : int = 800_800 , SCREAMING_SNAKE_CASE : int = 800_800 ): """simple docstring""" a__ : Union[str, Any] =degree * loga(SCREAMING_SNAKE_CASE ) a__ : List[str] =int(SCREAMING_SNAKE_CASE ) a__ : List[Any] =calculate_prime_numbers(SCREAMING_SNAKE_CASE ) a__ : Dict =0 a__ : int =0 a__ : Optional[int] =len(SCREAMING_SNAKE_CASE ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(F"""{solution() = }""")
95
import argparse import torch from transformers import GPTaLMHeadModel, RobertaForMaskedLM if __name__ == "__main__": a_ = argparse.ArgumentParser( description=( 'Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned' ' Distillation' ) ) parser.add_argument('--model_type', default='roberta', choices=['roberta', 'gpt2']) parser.add_argument('--model_name', default='roberta-large', type=str) parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_roberta_048131723.pth', type=str) parser.add_argument('--vocab_transform', action='store_true') a_ = parser.parse_args() if args.model_type == "roberta": a_ = RobertaForMaskedLM.from_pretrained(args.model_name) a_ = 'roberta' elif args.model_type == "gpt2": a_ = GPTaLMHeadModel.from_pretrained(args.model_name) a_ = 'transformer' a_ = model.state_dict() a_ = {} # Embeddings # if args.model_type == "gpt2": for param_name in ["wte.weight", "wpe.weight"]: a_ = state_dict[F"""{prefix}.{param_name}"""] else: for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]: a_ = F"""{prefix}.embeddings.{w}.weight""" a_ = state_dict[param_name] for w in ["weight", "bias"]: a_ = F"""{prefix}.embeddings.LayerNorm.{w}""" a_ = state_dict[param_name] # Transformer Blocks # a_ = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: if args.model_type == "gpt2": for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]: for w in ["weight", "bias"]: a_ = state_dict[ F"""{prefix}.h.{teacher_idx}.{layer}.{w}""" ] a_ = state_dict[F"""{prefix}.h.{teacher_idx}.attn.bias"""] else: for layer in [ "attention.self.query", "attention.self.key", "attention.self.value", "attention.output.dense", "attention.output.LayerNorm", "intermediate.dense", "output.dense", "output.LayerNorm", ]: for w in ["weight", "bias"]: a_ = state_dict[ F"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}""" ] std_idx += 1 # Language Modeling Head ###s if args.model_type == "roberta": for layer in ["lm_head.decoder.weight", "lm_head.bias"]: a_ = state_dict[F"""{layer}"""] if args.vocab_transform: for w in ["weight", "bias"]: a_ = state_dict[F"""lm_head.dense.{w}"""] a_ = state_dict[F"""lm_head.layer_norm.{w}"""] elif args.model_type == "gpt2": for w in ["weight", "bias"]: a_ = state_dict[F"""{prefix}.ln_f.{w}"""] a_ = state_dict['lm_head.weight'] print(F"""N layers selected for distillation: {std_idx}""") print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""") print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""") torch.save(compressed_sd, args.dump_checkpoint)
175
0
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : int = logging.get_logger(__name__) _lowerCAmelCase : Dict = { '''microsoft/git-base''': '''https://huggingface.co/microsoft/git-base/resolve/main/config.json''', } class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = '''git_vision_model''' def __init__( self :Union[str, Any] , snake_case :str=768 , snake_case :str=3_072 , snake_case :Optional[Any]=12 , snake_case :Any=12 , snake_case :Dict=3 , snake_case :Union[str, Any]=224 , snake_case :Optional[int]=16 , snake_case :Union[str, Any]="quick_gelu" , snake_case :Optional[int]=1e-5 , snake_case :List[str]=0.0 , snake_case :Any=0.02 , **snake_case :str , ): '''simple docstring''' super().__init__(**snake_case ) A_ : Optional[int] = hidden_size A_ : Optional[Any] = intermediate_size A_ : Dict = num_hidden_layers A_ : int = num_attention_heads A_ : int = num_channels A_ : Tuple = patch_size A_ : Dict = image_size A_ : Optional[int] = initializer_range A_ : str = attention_dropout A_ : Tuple = layer_norm_eps A_ : List[str] = hidden_act @classmethod def SCREAMING_SNAKE_CASE ( cls :Any , snake_case :Union[str, os.PathLike] , **snake_case :List[str] ): '''simple docstring''' cls._set_token_in_kwargs(snake_case ) A_ , A_ : Optional[Any] = cls.get_config_dict(snake_case , **snake_case ) # get the vision config dict if we are loading from GITConfig if config_dict.get("model_type" ) == "git": A_ : int = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(snake_case , **snake_case ) class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = '''git''' def __init__( self :List[str] , snake_case :Any=None , snake_case :int=30_522 , snake_case :Dict=768 , snake_case :List[Any]=6 , snake_case :Any=12 , snake_case :Any=3_072 , snake_case :List[Any]="gelu" , snake_case :Union[str, Any]=0.1 , snake_case :Any=0.1 , snake_case :Optional[int]=1_024 , snake_case :str=0.02 , snake_case :int=1e-12 , snake_case :Optional[int]=0 , snake_case :int="absolute" , snake_case :Tuple=True , snake_case :List[str]=False , snake_case :List[str]=101 , snake_case :int=102 , snake_case :str=None , **snake_case :List[Any] , ): '''simple docstring''' super().__init__(bos_token_id=snake_case , eos_token_id=snake_case , pad_token_id=snake_case , **snake_case ) if vision_config is None: A_ : Union[str, Any] = {} logger.info("vision_config is None. initializing the GitVisionConfig with default values." ) A_ : List[Any] = GitVisionConfig(**snake_case ) A_ : Optional[int] = vocab_size A_ : List[str] = hidden_size A_ : int = num_hidden_layers A_ : Union[str, Any] = num_attention_heads A_ : List[str] = hidden_act A_ : Dict = intermediate_size A_ : Tuple = hidden_dropout_prob A_ : str = attention_probs_dropout_prob A_ : Any = max_position_embeddings A_ : List[str] = initializer_range A_ : int = layer_norm_eps A_ : Dict = position_embedding_type A_ : str = use_cache A_ : str = tie_word_embeddings A_ : Optional[Any] = num_image_with_embedding A_ : int = bos_token_id A_ : Optional[int] = eos_token_id def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' A_ : Tuple = copy.deepcopy(self.__dict__ ) A_ : Optional[int] = self.vision_config.to_dict() A_ : Optional[Any] = self.__class__.model_type return output
70
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) _lowerCAmelCase : Dict = { '''configuration_blip''': [ '''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BlipConfig''', '''BlipTextConfig''', '''BlipVisionConfig''', ], '''processing_blip''': ['''BlipProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : str = ['''BlipImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Union[str, Any] = [ '''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BlipModel''', '''BlipPreTrainedModel''', '''BlipForConditionalGeneration''', '''BlipForQuestionAnswering''', '''BlipVisionModel''', '''BlipTextModel''', '''BlipForImageTextRetrieval''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Dict = [ '''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFBlipModel''', '''TFBlipPreTrainedModel''', '''TFBlipForConditionalGeneration''', '''TFBlipForQuestionAnswering''', '''TFBlipVisionModel''', '''TFBlipTextModel''', '''TFBlipForImageTextRetrieval''', ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys _lowerCAmelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
70
1
import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class __lowerCAmelCase ( unittest.TestCase ): def snake_case ( self , _snake_case , _snake_case ): """simple docstring""" return F'gaussian_noise_s={seed}_shape={"_".join([str(__UpperCamelCase ) for s in shape] )}.npy' def snake_case ( self ): """simple docstring""" super().tearDown() gc.collect() def snake_case ( self , _snake_case=0 , _snake_case=(4, 4, 64, 64) , _snake_case=False ): """simple docstring""" _lowerCAmelCase = jnp.bfloataa if fpaa else jnp.floataa _lowerCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(__UpperCamelCase , __UpperCamelCase ) ) , dtype=__UpperCamelCase ) return image def snake_case ( self , _snake_case=False , _snake_case="CompVis/stable-diffusion-v1-4" ): """simple docstring""" _lowerCAmelCase = jnp.bfloataa if fpaa else jnp.floataa _lowerCAmelCase = """bf16""" if fpaa else None _lowerCAmelCase , _lowerCAmelCase = FlaxUNetaDConditionModel.from_pretrained( __UpperCamelCase , subfolder="""unet""" , dtype=__UpperCamelCase , revision=__UpperCamelCase ) return model, params def snake_case ( self , _snake_case=0 , _snake_case=(4, 77, 768) , _snake_case=False ): """simple docstring""" _lowerCAmelCase = jnp.bfloataa if fpaa else jnp.floataa _lowerCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(__UpperCamelCase , __UpperCamelCase ) ) , dtype=__UpperCamelCase ) return hidden_states @parameterized.expand( [ # fmt: off [83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]], [17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]], [8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]], [3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]], # fmt: on ] ) def snake_case ( self , _snake_case , _snake_case , _snake_case ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""" , fpaa=__UpperCamelCase ) _lowerCAmelCase = self.get_latents(__UpperCamelCase , fpaa=__UpperCamelCase ) _lowerCAmelCase = self.get_encoder_hidden_states(__UpperCamelCase , fpaa=__UpperCamelCase ) _lowerCAmelCase = model.apply( {"""params""": params} , __UpperCamelCase , jnp.array(__UpperCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=__UpperCamelCase , ).sample assert sample.shape == latents.shape _lowerCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) _lowerCAmelCase = jnp.array(__UpperCamelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-2 ) @parameterized.expand( [ # fmt: off [83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]], [17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]], [8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]], [3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]], # fmt: on ] ) def snake_case ( self , _snake_case , _snake_case , _snake_case ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""" , fpaa=__UpperCamelCase ) _lowerCAmelCase = self.get_latents(__UpperCamelCase , shape=(4, 4, 96, 96) , fpaa=__UpperCamelCase ) _lowerCAmelCase = self.get_encoder_hidden_states(__UpperCamelCase , shape=(4, 77, 1024) , fpaa=__UpperCamelCase ) _lowerCAmelCase = model.apply( {"""params""": params} , __UpperCamelCase , jnp.array(__UpperCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=__UpperCamelCase , ).sample assert sample.shape == latents.shape _lowerCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) _lowerCAmelCase = jnp.array(__UpperCamelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-2 )
82
"""simple docstring""" import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() ) def lowercase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' _UpperCAmelCase = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue _UpperCAmelCase = key.replace('''heads.cmd.mim_head.cls.predictions''' , '''mmm_image_head''' ) _UpperCAmelCase = key.replace('''heads.cmd.mlm_head.cls.predictions''' , '''mmm_text_head''' ) _UpperCAmelCase = key.replace('''heads.cmd.itm_head.cls''' , '''itm_head''' ) _UpperCAmelCase = key.replace('''heads.cmd.itm_head.pooler''' , '''itm_head.pooler''' ) _UpperCAmelCase = key.replace('''heads.cmd.clip_head.logit_scale''' , '''flava.logit_scale''' ) _UpperCAmelCase = key.replace('''heads.fairseq_mlm.cls.predictions''' , '''mlm_head''' ) _UpperCAmelCase = key.replace('''heads.imagenet.mim_head.cls.predictions''' , '''mim_head''' ) _UpperCAmelCase = key.replace('''mm_text_projection''' , '''flava.text_to_mm_projection''' ) _UpperCAmelCase = key.replace('''mm_image_projection''' , '''flava.image_to_mm_projection''' ) _UpperCAmelCase = key.replace('''image_encoder.module''' , '''flava.image_model''' ) _UpperCAmelCase = key.replace('''text_encoder.module''' , '''flava.text_model''' ) _UpperCAmelCase = key.replace('''mm_encoder.module.encoder.cls_token''' , '''flava.multimodal_model.cls_token''' ) _UpperCAmelCase = key.replace('''mm_encoder.module''' , '''flava.multimodal_model''' ) _UpperCAmelCase = key.replace('''text_projection''' , '''flava.text_projection''' ) _UpperCAmelCase = key.replace('''image_projection''' , '''flava.image_projection''' ) _UpperCAmelCase = value.float() for key, value in codebook_state_dict.items(): _UpperCAmelCase = value return upgrade @torch.no_grad() def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int]=None ): '''simple docstring''' if config_path is not None: _UpperCAmelCase = FlavaConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) else: _UpperCAmelCase = FlavaConfig() _UpperCAmelCase = FlavaForPreTraining(_SCREAMING_SNAKE_CASE ).eval() _UpperCAmelCase = convert_dalle_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , save_checkpoint=_SCREAMING_SNAKE_CASE ) if os.path.exists(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location='''cpu''' ) else: _UpperCAmelCase = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='''cpu''' ) _UpperCAmelCase = upgrade_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) hf_model.load_state_dict(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = hf_model.state_dict() _UpperCAmelCase = count_parameters(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = count_parameters(_SCREAMING_SNAKE_CASE ) + count_parameters(_SCREAMING_SNAKE_CASE ) assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) hf_model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __A : Dict = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint") parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") __A : Optional[Any] = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
260
0
def __A ( __lowerCamelCase , __lowerCamelCase ) -> str: if number < 0 or shift_amount < 0: raise ValueError("""both inputs must be positive integers""" ) a = str(bin(__lowerCamelCase ) ) binary_number += "0" * shift_amount return binary_number def __A ( __lowerCamelCase , __lowerCamelCase ) -> str: if number < 0 or shift_amount < 0: raise ValueError("""both inputs must be positive integers""" ) a = str(bin(__lowerCamelCase ) )[2:] if shift_amount >= len(__lowerCamelCase ): return "0b0" a = binary_number[: len(__lowerCamelCase ) - shift_amount] return "0b" + shifted_binary_number def __A ( __lowerCamelCase , __lowerCamelCase ) -> str: if number >= 0: # Get binary representation of positive number a = """0""" + str(bin(__lowerCamelCase ) ).strip("""-""" )[2:] else: # Get binary (2's complement) representation of negative number a = len(bin(__lowerCamelCase )[3:] ) # Find 2's complement of number a = bin(abs(__lowerCamelCase ) - (1 << binary_number_length) )[3:] a = ( """1""" + """0""" * (binary_number_length - len(__lowerCamelCase )) + binary_number ) if shift_amount >= len(__lowerCamelCase ): return "0b" + binary_number[0] * len(__lowerCamelCase ) return ( "0b" + binary_number[0] * shift_amount + binary_number[: len(__lowerCamelCase ) - shift_amount] ) if __name__ == "__main__": import doctest doctest.testmod()
347
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline __UpperCamelCase : Any = datasets.utils.logging.get_logger(__name__) @dataclass class __lowerCAmelCase ( datasets.BuilderConfig ): UpperCamelCase__ = None UpperCamelCase__ = "utf-8" UpperCamelCase__ = None UpperCamelCase__ = None UpperCamelCase__ = True # deprecated UpperCamelCase__ = None # deprecated UpperCamelCase__ = 10 << 20 # 10MB UpperCamelCase__ = None class __lowerCAmelCase ( datasets.ArrowBasedBuilder ): UpperCamelCase__ = JsonConfig def lowerCamelCase__ ( self :str ): '''simple docstring''' if self.config.block_size is not None: logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" ) a = self.config.block_size if self.config.use_threads is not True: logger.warning( """The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" ) if self.config.newlines_in_values is not None: raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" ) return datasets.DatasetInfo(features=self.config.features ) def lowerCamelCase__ ( self :Tuple , __magic_name__ :str ): '''simple docstring''' if not self.config.data_files: raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' ) a = dl_manager.download_and_extract(self.config.data_files ) if isinstance(__magic_name__ , (str, list, tuple) ): a = data_files if isinstance(__magic_name__ , __magic_name__ ): a = [files] a = [dl_manager.iter_files(__magic_name__ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )] a = [] for split_name, files in data_files.items(): if isinstance(__magic_name__ , __magic_name__ ): a = [files] a = [dl_manager.iter_files(__magic_name__ ) for file in files] splits.append(datasets.SplitGenerator(name=__magic_name__ , gen_kwargs={"""files""": files} ) ) return splits def lowerCamelCase__ ( self :List[str] , __magic_name__ :pa.Table ): '''simple docstring''' if self.config.features is not None: # adding missing columns for column_name in set(self.config.features ) - set(pa_table.column_names ): a = self.config.features.arrow_schema.field(__magic_name__ ).type a = pa_table.append_column(__magic_name__ , pa.array([None] * len(__magic_name__ ) , type=__magic_name__ ) ) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example a = table_cast(__magic_name__ , self.config.features.arrow_schema ) return pa_table def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Union[str, Any] ): '''simple docstring''' for file_idx, file in enumerate(itertools.chain.from_iterable(__magic_name__ ) ): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(__magic_name__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: a = json.load(__magic_name__ ) # We keep only the field we are interested in a = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(__magic_name__ , (list, tuple) ): a = set().union(*[row.keys() for row in dataset] ) a = {col: [row.get(__magic_name__ ) for row in dataset] for col in keys} else: a = dataset a = pa.Table.from_pydict(__magic_name__ ) yield file_idx, self._cast_table(__magic_name__ ) # If the file has one json object per line else: with open(__magic_name__ , """rb""" ) as f: a = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small a = max(self.config.chunksize // 32 , 16 << 10 ) a = ( self.config.encoding_errors if self.config.encoding_errors is not None else """strict""" ) while True: a = f.read(self.config.chunksize ) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(__magic_name__ ) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": a = batch.decode(self.config.encoding , errors=__magic_name__ ).encode("""utf-8""" ) try: while True: try: a = paj.read_json( io.BytesIO(__magic_name__ ) , read_options=paj.ReadOptions(block_size=__magic_name__ ) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(__magic_name__ , pa.ArrowInvalid ) and "straddling" not in str(__magic_name__ ) or block_size > len(__magic_name__ ) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( F'Batch of {len(__magic_name__ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( __magic_name__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: a = json.load(__magic_name__ ) except json.JSONDecodeError: logger.error(F'Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}' ) raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(__magic_name__ , __magic_name__ ): # list is the only sequence type supported in JSON try: a = set().union(*[row.keys() for row in dataset] ) a = {col: [row.get(__magic_name__ ) for row in dataset] for col in keys} a = pa.Table.from_pydict(__magic_name__ ) except (pa.ArrowInvalid, AttributeError) as e: logger.error(F'Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}' ) raise ValueError(F'Not able to read records in the JSON file at {file}.' ) from None yield file_idx, self._cast_table(__magic_name__ ) break else: logger.error(F'Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}' ) raise ValueError( F'Not able to read records in the JSON file at {file}. ' F'You should probably indicate the field of the JSON file containing your records. ' F'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. ' F'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(__magic_name__ ) batch_idx += 1
347
1
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() __UpperCAmelCase =logging.get_logger(__name__) __UpperCAmelCase =[ ("bert.bert", "visual_bert"), ("bert.cls", "cls"), ("bert.classifier", "cls"), ("token_type_embeddings_visual", "visual_token_type_embeddings"), ("position_embeddings_visual", "visual_position_embeddings"), ("projection", "visual_projection"), ] __UpperCAmelCase =[ "nlvr2_coco_pre_trained.th", "nlvr2_fine_tuned.th", "nlvr2_pre_trained.th", "vcr_coco_pre_train.th", "vcr_fine_tune.th", "vcr_pre_train.th", "vqa_coco_pre_trained.th", "vqa_fine_tuned.th", "vqa_pre_trained.th", ] def __lowerCAmelCase ( UpperCamelCase__ ) -> Optional[Any]: __lowerCamelCase = torch.load(A__ , map_location='''cpu''' ) return sd def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=rename_keys_prefix ) -> Dict: __lowerCamelCase = OrderedDict() __lowerCamelCase = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue __lowerCamelCase = key for name_pair in rename_keys_prefix: __lowerCamelCase = new_key.replace(name_pair[0] , name_pair[1] ) __lowerCamelCase = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately __lowerCamelCase = new_d["""cls.predictions.bias"""] return new_d @torch.no_grad() def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]: assert ( checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS ), f"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.""" # Get Config if "pre" in checkpoint_path: __lowerCamelCase = """pretraining""" if "vcr" in checkpoint_path: __lowerCamelCase = {"""visual_embedding_dim""": 5_12} elif "vqa_advanced" in checkpoint_path: __lowerCamelCase = {"""visual_embedding_dim""": 20_48} elif "vqa" in checkpoint_path: __lowerCamelCase = {"""visual_embedding_dim""": 20_48} elif "nlvr" in checkpoint_path: __lowerCamelCase = {"""visual_embedding_dim""": 10_24} else: raise NotImplementedError(f"""No implementation found for `{checkpoint_path}`.""" ) else: if "vcr" in checkpoint_path: __lowerCamelCase = {"""visual_embedding_dim""": 5_12} __lowerCamelCase = """multichoice""" elif "vqa_advanced" in checkpoint_path: __lowerCamelCase = {"""visual_embedding_dim""": 20_48} __lowerCamelCase = """vqa_advanced""" elif "vqa" in checkpoint_path: __lowerCamelCase = {"""visual_embedding_dim""": 20_48, """num_labels""": 31_29} __lowerCamelCase = """vqa""" elif "nlvr" in checkpoint_path: __lowerCamelCase = { """visual_embedding_dim""": 10_24, """num_labels""": 2, } __lowerCamelCase = """nlvr""" __lowerCamelCase = VisualBertConfig(**A__ ) # Load State Dict __lowerCamelCase = load_state_dict(A__ ) __lowerCamelCase = get_new_dict(A__ , A__ ) if model_type == "pretraining": __lowerCamelCase = VisualBertForPreTraining(A__ ) elif model_type == "vqa": __lowerCamelCase = VisualBertForQuestionAnswering(A__ ) elif model_type == "nlvr": __lowerCamelCase = VisualBertForVisualReasoning(A__ ) elif model_type == "multichoice": __lowerCamelCase = VisualBertForMultipleChoice(A__ ) model.load_state_dict(A__ ) # Save Checkpoints Path(A__ ).mkdir(exist_ok=A__ ) model.save_pretrained(A__ ) if __name__ == "__main__": __UpperCAmelCase =argparse.ArgumentParser() # Required parameters parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.") parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.") __UpperCAmelCase =parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
67
'''simple docstring''' from sklearn.metrics import recall_score import datasets __A : Dict = "\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n" __A : List[Any] = "\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n" __A : str = "\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION) class __snake_case ( datasets.Metric): """simple docstring""" def __lowercase ( self : str ) -> Dict: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""int32""" ) ), """references""": datasets.Sequence(datasets.Value("""int32""" ) ), } if self.config_name == """multilabel""" else { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , ) def __lowercase ( self : Tuple , lowerCamelCase : Optional[Any] , lowerCamelCase : str , lowerCamelCase : Optional[Any]=None , lowerCamelCase : Optional[int]=1 , lowerCamelCase : Union[str, Any]="binary" , lowerCamelCase : Any=None , lowerCamelCase : str="warn" , ) -> List[Any]: lowerCAmelCase_ : Optional[int] = recall_score( lowerCamelCase , lowerCamelCase , labels=lowerCamelCase , pos_label=lowerCamelCase , average=lowerCamelCase , sample_weight=lowerCamelCase , zero_division=lowerCamelCase , ) return {"recall": float(lowerCamelCase ) if score.size == 1 else score}
120
0
import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class UpperCAmelCase__ ( A__ ): """simple docstring""" def lowercase_ ( self : Any , __lowerCamelCase : str ) -> List[str]: with open(__lowerCamelCase , encoding='''utf-8''' ) as input_file: SCREAMING_SNAKE_CASE__ = re.compile(r'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' ) SCREAMING_SNAKE_CASE__ = input_file.read() SCREAMING_SNAKE_CASE__ = regexp.search(__lowerCamelCase ) return match def lowercase_ ( self : Optional[Any] , __lowerCamelCase : str ) -> List[str]: with open(__lowerCamelCase , encoding='''utf-8''' ) as input_file: SCREAMING_SNAKE_CASE__ = re.compile(r'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''' , re.DOTALL ) SCREAMING_SNAKE_CASE__ = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` SCREAMING_SNAKE_CASE__ = regexp.finditer(__lowerCamelCase ) SCREAMING_SNAKE_CASE__ = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def lowercase_ ( self : Union[str, Any] ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ = Path('''./datasets''' ) SCREAMING_SNAKE_CASE__ = list(dataset_paths.absolute().glob('''**/*.py''' ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(__lowerCamelCase ) ): raise AssertionError(f'''open(...) must use utf-8 encoding in {dataset}''' ) def lowercase_ ( self : Union[str, Any] ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ = Path('''./datasets''' ) SCREAMING_SNAKE_CASE__ = list(dataset_paths.absolute().glob('''**/*.py''' ) ) for dataset in dataset_files: if self._no_print_statements(str(__lowerCamelCase ) ): raise AssertionError(f'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
218
import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all image processors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...image_processing_utils import ImageProcessingMixin from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) _SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE : Tuple = OrderedDict( [ ('''align''', '''EfficientNetImageProcessor'''), ('''beit''', '''BeitImageProcessor'''), ('''bit''', '''BitImageProcessor'''), ('''blip''', '''BlipImageProcessor'''), ('''blip-2''', '''BlipImageProcessor'''), ('''bridgetower''', '''BridgeTowerImageProcessor'''), ('''chinese_clip''', '''ChineseCLIPImageProcessor'''), ('''clip''', '''CLIPImageProcessor'''), ('''clipseg''', '''ViTImageProcessor'''), ('''conditional_detr''', '''ConditionalDetrImageProcessor'''), ('''convnext''', '''ConvNextImageProcessor'''), ('''convnextv2''', '''ConvNextImageProcessor'''), ('''cvt''', '''ConvNextImageProcessor'''), ('''data2vec-vision''', '''BeitImageProcessor'''), ('''deformable_detr''', '''DeformableDetrImageProcessor'''), ('''deit''', '''DeiTImageProcessor'''), ('''deta''', '''DetaImageProcessor'''), ('''detr''', '''DetrImageProcessor'''), ('''dinat''', '''ViTImageProcessor'''), ('''donut-swin''', '''DonutImageProcessor'''), ('''dpt''', '''DPTImageProcessor'''), ('''efficientformer''', '''EfficientFormerImageProcessor'''), ('''efficientnet''', '''EfficientNetImageProcessor'''), ('''flava''', '''FlavaImageProcessor'''), ('''focalnet''', '''BitImageProcessor'''), ('''git''', '''CLIPImageProcessor'''), ('''glpn''', '''GLPNImageProcessor'''), ('''groupvit''', '''CLIPImageProcessor'''), ('''imagegpt''', '''ImageGPTImageProcessor'''), ('''instructblip''', '''BlipImageProcessor'''), ('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''), ('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''), ('''levit''', '''LevitImageProcessor'''), ('''mask2former''', '''Mask2FormerImageProcessor'''), ('''maskformer''', '''MaskFormerImageProcessor'''), ('''mgp-str''', '''ViTImageProcessor'''), ('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''), ('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''), ('''mobilevit''', '''MobileViTImageProcessor'''), ('''mobilevit''', '''MobileViTImageProcessor'''), ('''mobilevitv2''', '''MobileViTImageProcessor'''), ('''nat''', '''ViTImageProcessor'''), ('''oneformer''', '''OneFormerImageProcessor'''), ('''owlvit''', '''OwlViTImageProcessor'''), ('''perceiver''', '''PerceiverImageProcessor'''), ('''pix2struct''', '''Pix2StructImageProcessor'''), ('''poolformer''', '''PoolFormerImageProcessor'''), ('''regnet''', '''ConvNextImageProcessor'''), ('''resnet''', '''ConvNextImageProcessor'''), ('''sam''', '''SamImageProcessor'''), ('''segformer''', '''SegformerImageProcessor'''), ('''swiftformer''', '''ViTImageProcessor'''), ('''swin''', '''ViTImageProcessor'''), ('''swin2sr''', '''Swin2SRImageProcessor'''), ('''swinv2''', '''ViTImageProcessor'''), ('''table-transformer''', '''DetrImageProcessor'''), ('''timesformer''', '''VideoMAEImageProcessor'''), ('''tvlt''', '''TvltImageProcessor'''), ('''upernet''', '''SegformerImageProcessor'''), ('''van''', '''ConvNextImageProcessor'''), ('''videomae''', '''VideoMAEImageProcessor'''), ('''vilt''', '''ViltImageProcessor'''), ('''vit''', '''ViTImageProcessor'''), ('''vit_hybrid''', '''ViTHybridImageProcessor'''), ('''vit_mae''', '''ViTImageProcessor'''), ('''vit_msn''', '''ViTImageProcessor'''), ('''xclip''', '''CLIPImageProcessor'''), ('''yolos''', '''YolosImageProcessor'''), ] ) _SCREAMING_SNAKE_CASE : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES) def UpperCAmelCase_ ( _A ): '''simple docstring''' for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items(): if class_name in extractors: SCREAMING_SNAKE_CASE__ = model_type_to_module_name(_A ) SCREAMING_SNAKE_CASE__ = importlib.import_module(F'''.{module_name}''' , '''transformers.models''' ) try: return getattr(_A , _A ) except AttributeError: continue for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items(): if getattr(_A , '''__name__''' , _A ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. SCREAMING_SNAKE_CASE__ = importlib.import_module('''transformers''' ) if hasattr(_A , _A ): return getattr(_A , _A ) return None def UpperCAmelCase_ ( _A , _A = None , _A = False , _A = False , _A = None , _A = None , _A = None , _A = False , **_A , ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = get_file_from_repo( _A , _A , cache_dir=_A , force_download=_A , resume_download=_A , proxies=_A , use_auth_token=_A , revision=_A , local_files_only=_A , ) if resolved_config_file is None: logger.info( '''Could not locate the image processor configuration file, will try to use the model config instead.''' ) return {} with open(_A , encoding='''utf-8''' ) as reader: return json.load(_A ) class UpperCAmelCase__ : """simple docstring""" def __init__( self : List[Any] ) -> int: raise EnvironmentError( '''AutoImageProcessor is designed to be instantiated ''' '''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' ) @classmethod @replace_list_option_in_docstrings(__lowerCamelCase ) def lowercase_ ( cls : Optional[int] , __lowerCamelCase : Any , **__lowerCamelCase : Tuple ) -> List[str]: SCREAMING_SNAKE_CASE__ = kwargs.pop('''config''' , __lowerCamelCase ) SCREAMING_SNAKE_CASE__ = kwargs.pop('''trust_remote_code''' , __lowerCamelCase ) SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = ImageProcessingMixin.get_image_processor_dict(__lowerCamelCase , **__lowerCamelCase ) SCREAMING_SNAKE_CASE__ = config_dict.get('''image_processor_type''' , __lowerCamelCase ) SCREAMING_SNAKE_CASE__ = None if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ): SCREAMING_SNAKE_CASE__ = config_dict['''auto_map''']['''AutoImageProcessor'''] # If we still don't have the image processor class, check if we're loading from a previous feature extractor config # and if so, infer the image processor class from there. if image_processor_class is None and image_processor_auto_map is None: SCREAMING_SNAKE_CASE__ = config_dict.pop('''feature_extractor_type''' , __lowerCamelCase ) if feature_extractor_class is not None: logger.warning( '''Could not find image processor class in the image processor config or the model config. Loading''' ''' based on pattern matching with the model\'s feature extractor configuration.''' ) SCREAMING_SNAKE_CASE__ = feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' ) if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ): SCREAMING_SNAKE_CASE__ = config_dict['''auto_map''']['''AutoFeatureExtractor'''] SCREAMING_SNAKE_CASE__ = feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' ) logger.warning( '''Could not find image processor auto map in the image processor config or the model config.''' ''' Loading based on pattern matching with the model\'s feature extractor configuration.''' ) # If we don't find the image processor class in the image processor config, let's try the model config. if image_processor_class is None and image_processor_auto_map is None: if not isinstance(__lowerCamelCase , __lowerCamelCase ): SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__lowerCamelCase , **__lowerCamelCase ) # It could be in `config.image_processor_type`` SCREAMING_SNAKE_CASE__ = getattr(__lowerCamelCase , '''image_processor_type''' , __lowerCamelCase ) if hasattr(__lowerCamelCase , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map: SCREAMING_SNAKE_CASE__ = config.auto_map['''AutoImageProcessor'''] if image_processor_class is not None: SCREAMING_SNAKE_CASE__ = image_processor_class_from_name(__lowerCamelCase ) SCREAMING_SNAKE_CASE__ = image_processor_auto_map is not None SCREAMING_SNAKE_CASE__ = image_processor_class is not None or type(__lowerCamelCase ) in IMAGE_PROCESSOR_MAPPING SCREAMING_SNAKE_CASE__ = resolve_trust_remote_code( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if has_remote_code and trust_remote_code: SCREAMING_SNAKE_CASE__ = get_class_from_dynamic_module( __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ) SCREAMING_SNAKE_CASE__ = kwargs.pop('''code_revision''' , __lowerCamelCase ) if os.path.isdir(__lowerCamelCase ): image_processor_class.register_for_auto_class() return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase ) elif image_processor_class is not None: return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase ) # Last try: we use the IMAGE_PROCESSOR_MAPPING. elif type(__lowerCamelCase ) in IMAGE_PROCESSOR_MAPPING: SCREAMING_SNAKE_CASE__ = IMAGE_PROCESSOR_MAPPING[type(__lowerCamelCase )] return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase ) raise ValueError( f'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a ''' f'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following ''' f'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' ) @staticmethod def lowercase_ ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] ) -> str: IMAGE_PROCESSOR_MAPPING.register(__lowerCamelCase , __lowerCamelCase )
218
1
import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType UpperCAmelCase = logging.get_logger(__name__) class A_ ( __A ): '''simple docstring''' _UpperCamelCase : Any = """vision-encoder-decoder""" _UpperCamelCase : int = True def __init__( self , **snake_case ): super().__init__(**_lowerCamelCase ) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( F'''A configuraton of type {self.model_type} cannot be instantiated because ''' F'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' ) lowercase = kwargs.pop('encoder' ) lowercase = encoder_config.pop('model_type' ) lowercase = kwargs.pop('decoder' ) lowercase = decoder_config.pop('model_type' ) lowercase = AutoConfig.for_model(_lowerCamelCase , **_lowerCamelCase ) lowercase = AutoConfig.for_model(_lowerCamelCase , **_lowerCamelCase ) lowercase = True @classmethod def SCREAMING_SNAKE_CASE__ ( cls , snake_case , snake_case , **snake_case ): logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' ) lowercase = True lowercase = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase = copy.deepcopy(self.__dict__ ) lowercase = self.encoder.to_dict() lowercase = self.decoder.to_dict() lowercase = self.__class__.model_type return output class A_ ( __A ): '''simple docstring''' _UpperCamelCase : List[str] = version.parse("""1.11""" ) @property def SCREAMING_SNAKE_CASE__ ( self ): return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def SCREAMING_SNAKE_CASE__ ( self ): return 1E-4 @property def SCREAMING_SNAKE_CASE__ ( self ): return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}} ) class A_ ( __A ): '''simple docstring''' @property def SCREAMING_SNAKE_CASE__ ( self ): lowercase = OrderedDict() lowercase = {0: """batch""", 1: """past_decoder_sequence + sequence"""} lowercase = {0: """batch""", 1: """past_decoder_sequence + sequence"""} lowercase = {0: """batch""", 1: """encoder_sequence"""} return common_inputs def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = -1 , snake_case = -1 , snake_case = False , snake_case = None , ): import torch lowercase = OrderedDict() lowercase = super().generate_dummy_inputs( _lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase ) lowercase = dummy_input["""input_ids"""].shape lowercase = (batch, encoder_sequence, self._config.encoder_hidden_size) lowercase = dummy_input.pop('input_ids' ) lowercase = dummy_input.pop('attention_mask' ) lowercase = torch.zeros(_lowerCamelCase ) return common_inputs class A_ ( __A ): '''simple docstring''' @property def SCREAMING_SNAKE_CASE__ ( self ): pass def SCREAMING_SNAKE_CASE__ ( self , snake_case ): return VisionEncoderDecoderEncoderOnnxConfig(_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case = "default" ): lowercase = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(_lowerCamelCase , _lowerCamelCase )
195
'''simple docstring''' from __future__ import annotations from math import pi, sqrt def UpperCAmelCase ( a_ , a_ ) -> tuple: """simple docstring""" if inductance <= 0: raise ValueError("""Inductance cannot be 0 or negative""" ) elif capacitance <= 0: raise ValueError("""Capacitance cannot be 0 or negative""" ) else: return ( "Resonant frequency", float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ), ) if __name__ == "__main__": import doctest doctest.testmod()
344
0
import subprocess import sys from transformers import BertConfig, BertModel, BertTokenizer, pipeline from transformers.testing_utils import TestCasePlus, require_torch class UpperCAmelCase__ ( A__ ): """simple docstring""" @require_torch def lowercase_ ( self : Optional[Any] ) -> Any: # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched SCREAMING_SNAKE_CASE__ = ''' from transformers import BertConfig, BertModel, BertTokenizer, pipeline ''' SCREAMING_SNAKE_CASE__ = ''' mname = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) BertTokenizer.from_pretrained(mname) pipe = pipeline(task="fill-mask", model=mname) print("success") ''' SCREAMING_SNAKE_CASE__ = ''' import socket def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet") socket.socket = offline_socket ''' # Force fetching the files so that we can use the cache SCREAMING_SNAKE_CASE__ = '''hf-internal-testing/tiny-random-bert''' BertConfig.from_pretrained(__lowerCamelCase ) BertModel.from_pretrained(__lowerCamelCase ) BertTokenizer.from_pretrained(__lowerCamelCase ) pipeline(task='''fill-mask''' , model=__lowerCamelCase ) # baseline - just load from_pretrained with normal network SCREAMING_SNAKE_CASE__ = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )] # should succeed SCREAMING_SNAKE_CASE__ = self.get_env() # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files SCREAMING_SNAKE_CASE__ = '''1''' SCREAMING_SNAKE_CASE__ = subprocess.run(__lowerCamelCase , env=__lowerCamelCase , check=__lowerCamelCase , capture_output=__lowerCamelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() ) @require_torch def lowercase_ ( self : List[str] ) -> Dict: # python one-liner segments # this must be loaded before socket.socket is monkey-patched SCREAMING_SNAKE_CASE__ = ''' from transformers import BertConfig, BertModel, BertTokenizer, pipeline ''' SCREAMING_SNAKE_CASE__ = ''' mname = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) BertTokenizer.from_pretrained(mname) pipe = pipeline(task="fill-mask", model=mname) print("success") ''' SCREAMING_SNAKE_CASE__ = ''' import socket def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet") socket.socket = offline_socket ''' # Force fetching the files so that we can use the cache SCREAMING_SNAKE_CASE__ = '''hf-internal-testing/tiny-random-bert''' BertConfig.from_pretrained(__lowerCamelCase ) BertModel.from_pretrained(__lowerCamelCase ) BertTokenizer.from_pretrained(__lowerCamelCase ) pipeline(task='''fill-mask''' , model=__lowerCamelCase ) # baseline - just load from_pretrained with normal network SCREAMING_SNAKE_CASE__ = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )] # should succeed SCREAMING_SNAKE_CASE__ = self.get_env() SCREAMING_SNAKE_CASE__ = subprocess.run(__lowerCamelCase , env=__lowerCamelCase , check=__lowerCamelCase , capture_output=__lowerCamelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() ) @require_torch def lowercase_ ( self : Optional[Any] ) -> Dict: # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched SCREAMING_SNAKE_CASE__ = ''' from transformers import BertConfig, BertModel, BertTokenizer ''' SCREAMING_SNAKE_CASE__ = ''' mname = "hf-internal-testing/tiny-random-bert-sharded" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) print("success") ''' SCREAMING_SNAKE_CASE__ = ''' import socket def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled") socket.socket = offline_socket ''' # baseline - just load from_pretrained with normal network SCREAMING_SNAKE_CASE__ = [sys.executable, '''-c''', '''\n'''.join([load, run] )] # should succeed SCREAMING_SNAKE_CASE__ = self.get_env() SCREAMING_SNAKE_CASE__ = subprocess.run(__lowerCamelCase , env=__lowerCamelCase , check=__lowerCamelCase , capture_output=__lowerCamelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() ) # next emulate no network SCREAMING_SNAKE_CASE__ = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )] # Doesn't fail anymore since the model is in the cache due to other tests, so commenting this. # env["TRANSFORMERS_OFFLINE"] = "0" # result = subprocess.run(cmd, env=env, check=False, capture_output=True) # self.assertEqual(result.returncode, 1, result.stderr) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files SCREAMING_SNAKE_CASE__ = '''1''' SCREAMING_SNAKE_CASE__ = subprocess.run(__lowerCamelCase , env=__lowerCamelCase , check=__lowerCamelCase , capture_output=__lowerCamelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() ) @require_torch def lowercase_ ( self : Tuple ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ = ''' from transformers import pipeline ''' SCREAMING_SNAKE_CASE__ = ''' mname = "hf-internal-testing/tiny-random-bert" pipe = pipeline(model=mname) ''' SCREAMING_SNAKE_CASE__ = ''' import socket def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled") socket.socket = offline_socket ''' SCREAMING_SNAKE_CASE__ = self.get_env() SCREAMING_SNAKE_CASE__ = '''1''' SCREAMING_SNAKE_CASE__ = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )] SCREAMING_SNAKE_CASE__ = subprocess.run(__lowerCamelCase , env=__lowerCamelCase , check=__lowerCamelCase , capture_output=__lowerCamelCase ) self.assertEqual(result.returncode , 1 , result.stderr ) self.assertIn( '''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , ) @require_torch def lowercase_ ( self : List[Any] ) -> Any: SCREAMING_SNAKE_CASE__ = ''' from transformers import AutoModel ''' SCREAMING_SNAKE_CASE__ = ''' mname = "hf-internal-testing/test_dynamic_model" AutoModel.from_pretrained(mname, trust_remote_code=True) print("success") ''' # baseline - just load from_pretrained with normal network SCREAMING_SNAKE_CASE__ = [sys.executable, '''-c''', '''\n'''.join([load, run] )] # should succeed SCREAMING_SNAKE_CASE__ = self.get_env() SCREAMING_SNAKE_CASE__ = subprocess.run(__lowerCamelCase , env=__lowerCamelCase , check=__lowerCamelCase , capture_output=__lowerCamelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() ) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files SCREAMING_SNAKE_CASE__ = '''1''' SCREAMING_SNAKE_CASE__ = subprocess.run(__lowerCamelCase , env=__lowerCamelCase , check=__lowerCamelCase , capture_output=__lowerCamelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() )
350
from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE : int = { '''edbeeching/decision-transformer-gym-hopper-medium''': ( '''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json''' ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class UpperCAmelCase__ ( A__ ): """simple docstring""" a = "decision_transformer" a = ["past_key_values"] a = { "max_position_embeddings": "n_positions", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : Tuple , __lowerCamelCase : Any=17 , __lowerCamelCase : Any=4 , __lowerCamelCase : List[Any]=128 , __lowerCamelCase : Union[str, Any]=4096 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Any=1 , __lowerCamelCase : List[Any]=1024 , __lowerCamelCase : Any=3 , __lowerCamelCase : Dict=1 , __lowerCamelCase : List[Any]=None , __lowerCamelCase : str="relu" , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : str=1e-5 , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : int=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[int]=5_0256 , __lowerCamelCase : Tuple=5_0256 , __lowerCamelCase : int=False , __lowerCamelCase : Optional[int]=False , **__lowerCamelCase : Tuple , ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ = state_dim SCREAMING_SNAKE_CASE__ = act_dim SCREAMING_SNAKE_CASE__ = hidden_size SCREAMING_SNAKE_CASE__ = max_ep_len SCREAMING_SNAKE_CASE__ = action_tanh SCREAMING_SNAKE_CASE__ = vocab_size SCREAMING_SNAKE_CASE__ = n_positions SCREAMING_SNAKE_CASE__ = n_layer SCREAMING_SNAKE_CASE__ = n_head SCREAMING_SNAKE_CASE__ = n_inner SCREAMING_SNAKE_CASE__ = activation_function SCREAMING_SNAKE_CASE__ = resid_pdrop SCREAMING_SNAKE_CASE__ = embd_pdrop SCREAMING_SNAKE_CASE__ = attn_pdrop SCREAMING_SNAKE_CASE__ = layer_norm_epsilon SCREAMING_SNAKE_CASE__ = initializer_range SCREAMING_SNAKE_CASE__ = scale_attn_weights SCREAMING_SNAKE_CASE__ = use_cache SCREAMING_SNAKE_CASE__ = scale_attn_by_inverse_layer_idx SCREAMING_SNAKE_CASE__ = reorder_and_upcast_attn SCREAMING_SNAKE_CASE__ = bos_token_id SCREAMING_SNAKE_CASE__ = eos_token_id super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
218
0
import numpy as np import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () __a = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False) # Create two fuzzy sets by defining any membership function # (trapmf(), gbellmf(), gaussmf(), etc). __a = [0, 2_5, 5_0] __a = [2_5, 5_0, 7_5] __a = fuzz.membership.trimf(X, abca) __a = fuzz.membership.trimf(X, abca) # Compute the different operations using inbuilt functions. __a = np.ones(7_5) __a = np.zeros((7_5,)) # 1. Union = max(µA(x), µB(x)) __a = fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) __a = fuzz.fuzzy_and(X, young, X, middle_aged)[1] # 3. Complement (A) = (1- min(µA(x)) __a = fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) __a = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))] __a = young + middle_aged - (young * middle_aged) # 6. Algebraic Product = (µA(x) * µB(x)) __a = young * middle_aged # 7. Bounded Sum = min[1,(µA(x), µB(x))] __a = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1] # 8. Bounded difference = min[0,(µA(x), µB(x))] __a = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1] # max-min composition # max-product composition # Plot each set A, set B and each operation result using plot() and subplot(). from matplotlib import pyplot as plt plt.figure() plt.subplot(4, 3, 1) plt.plot(X, young) plt.title('Young') plt.grid(True) plt.subplot(4, 3, 2) plt.plot(X, middle_aged) plt.title('Middle aged') plt.grid(True) plt.subplot(4, 3, 3) plt.plot(X, union) plt.title('union') plt.grid(True) plt.subplot(4, 3, 4) plt.plot(X, intersection) plt.title('intersection') plt.grid(True) plt.subplot(4, 3, 5) plt.plot(X, complement_a) plt.title('complement_a') plt.grid(True) plt.subplot(4, 3, 6) plt.plot(X, difference) plt.title('difference a/b') plt.grid(True) plt.subplot(4, 3, 7) plt.plot(X, alg_sum) plt.title('alg_sum') plt.grid(True) plt.subplot(4, 3, 8) plt.plot(X, alg_product) plt.title('alg_product') plt.grid(True) plt.subplot(4, 3, 9) plt.plot(X, bdd_sum) plt.title('bdd_sum') plt.grid(True) plt.subplot(4, 3, 1_0) plt.plot(X, bdd_difference) plt.title('bdd_difference') plt.grid(True) plt.subplots_adjust(hspace=0.5) plt.show()
30
"""simple docstring""" import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = ["""image_processor""", """tokenizer"""] lowerCamelCase__ = """BlipImageProcessor""" lowerCamelCase__ = """AutoTokenizer""" def __init__( self , lowercase , lowercase , lowercase ): super().__init__(lowercase , lowercase ) # add QFormer tokenizer _lowerCamelCase : int = qformer_tokenizer def __call__( self , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ): if images is None and text is None: raise ValueError('You have to specify at least images or text.' ) _lowerCamelCase : int = BatchFeature() if text is not None: _lowerCamelCase : List[str] = self.tokenizer( text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , ) encoding.update(lowercase ) _lowerCamelCase : List[str] = self.qformer_tokenizer( text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , ) _lowerCamelCase : List[Any] = qformer_text_encoding.pop('input_ids' ) _lowerCamelCase : Tuple = qformer_text_encoding.pop('attention_mask' ) if images is not None: _lowerCamelCase : int = self.image_processor(lowercase , return_tensors=lowercase ) encoding.update(lowercase ) return encoding def A_ ( self , *lowercase , **lowercase ): return self.tokenizer.batch_decode(*lowercase , **lowercase ) def A_ ( self , *lowercase , **lowercase ): return self.tokenizer.decode(*lowercase , **lowercase ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def A_ ( self ): _lowerCamelCase : Union[str, Any] = self.tokenizer.model_input_names _lowerCamelCase : Any = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) def A_ ( self , lowercase , **lowercase ): if os.path.isfile(lowercase ): raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' ) os.makedirs(lowercase , exist_ok=lowercase ) _lowerCamelCase : Optional[Any] = os.path.join(lowercase , 'qformer_tokenizer' ) self.qformer_tokenizer.save_pretrained(lowercase ) return super().save_pretrained(lowercase , **lowercase ) @classmethod def A_ ( cls , lowercase , **lowercase ): _lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase , subfolder='qformer_tokenizer' ) _lowerCamelCase : Dict = cls._get_arguments_from_pretrained(lowercase , **lowercase ) args.append(lowercase ) return cls(*lowercase )
96
0
import inspect import unittest import numpy as np from transformers import BeitConfig from transformers.testing_utils import require_flax, require_vision, slow from transformers.utils import cached_property, is_flax_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor if is_flax_available(): import jax from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class lowercase ( unittest.TestCase ): def __init__( self , A_ , A_=100 , A_=13 , A_=30 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=3 , ) -> Tuple: """simple docstring""" UpperCamelCase = parent UpperCamelCase = vocab_size UpperCamelCase = batch_size UpperCamelCase = image_size UpperCamelCase = patch_size UpperCamelCase = num_channels UpperCamelCase = is_training UpperCamelCase = use_labels UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = type_sequence_label_size UpperCamelCase = initializer_range # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCamelCase = (image_size // patch_size) ** 2 UpperCamelCase = num_patches + 1 def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase = BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , ) return config, pixel_values, labels def __UpperCamelCase ( self , A_ , A_ , A_ ) -> int: """simple docstring""" UpperCamelCase = FlaxBeitModel(config=A_ ) UpperCamelCase = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCamelCase ( self , A_ , A_ , A_ ) -> int: """simple docstring""" UpperCamelCase = FlaxBeitForMaskedImageModeling(config=A_ ) UpperCamelCase = model(A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Optional[int]: """simple docstring""" UpperCamelCase = self.type_sequence_label_size UpperCamelCase = FlaxBeitForImageClassification(config=A_ ) UpperCamelCase = model(A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCamelCase = 1 UpperCamelCase = FlaxBeitForImageClassification(A_ ) UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCamelCase = model(A_ ) def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) = config_and_inputs UpperCamelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_flax class lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ): __lowercase : int = ( (FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else () ) def __UpperCamelCase ( self ) -> None: """simple docstring""" UpperCamelCase = FlaxBeitModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 ) def __UpperCamelCase ( self ) -> int: """simple docstring""" self.config_tester.run_common_tests() def __UpperCamelCase ( self ) -> int: """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(A_ ) UpperCamelCase = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase = [*signature.parameters.keys()] UpperCamelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , A_ ) def __UpperCamelCase ( self ) -> str: """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCamelCase = self._prepare_for_class(A_ , A_ ) UpperCamelCase = model_class(A_ ) @jax.jit def model_jitted(A_ , **A_ ): return model(pixel_values=A_ , **A_ ) with self.subTest('JIT Enabled' ): UpperCamelCase = model_jitted(**A_ ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): UpperCamelCase = model_jitted(**A_ ).to_tuple() self.assertEqual(len(A_ ) , len(A_ ) ) for jitted_output, output in zip(A_ , A_ ): self.assertEqual(jitted_output.shape , output.shape ) def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def __UpperCamelCase ( self ) -> int: """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*A_ ) def __UpperCamelCase ( self ) -> Any: """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A_ ) @slow def __UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" for model_class_name in self.all_model_classes: UpperCamelCase = model_class_name.from_pretrained('microsoft/beit-base-patch16-224' ) UpperCamelCase = model(np.ones((1, 3, 224, 224) ) ) self.assertIsNotNone(A_ ) def A ( ) -> Tuple: '''simple docstring''' UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_vision @require_flax class lowercase ( unittest.TestCase ): @cached_property def __UpperCamelCase ( self ) -> Dict: """simple docstring""" return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None @slow def __UpperCamelCase ( self ) -> int: """simple docstring""" UpperCamelCase = FlaxBeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ) UpperCamelCase = self.default_image_processor UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=A_ , return_tensors='np' ).pixel_values # prepare bool_masked_pos UpperCamelCase = np.ones((1, 196) , dtype=A_ ) # forward pass UpperCamelCase = model(pixel_values=A_ , bool_masked_pos=A_ ) UpperCamelCase = outputs.logits # verify the logits UpperCamelCase = (1, 196, 8_192) self.assertEqual(logits.shape , A_ ) UpperCamelCase = np.array( [[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ) self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , A_ , atol=1e-2 ) ) @slow def __UpperCamelCase ( self ) -> List[str]: """simple docstring""" UpperCamelCase = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ) UpperCamelCase = self.default_image_processor UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=A_ , return_tensors='np' ) # forward pass UpperCamelCase = model(**A_ ) UpperCamelCase = outputs.logits # verify the logits UpperCamelCase = (1, 1_000) self.assertEqual(logits.shape , A_ ) UpperCamelCase = np.array([-1.2385, -1.0987, -1.0108] ) self.assertTrue(np.allclose(logits[0, :3] , A_ , atol=1e-4 ) ) UpperCamelCase = 281 self.assertEqual(logits.argmax(-1 ).item() , A_ ) @slow def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ) UpperCamelCase = self.default_image_processor UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=A_ , return_tensors='np' ) # forward pass UpperCamelCase = model(**A_ ) UpperCamelCase = outputs.logits # verify the logits UpperCamelCase = (1, 21_841) self.assertEqual(logits.shape , A_ ) UpperCamelCase = np.array([1.6881, -0.2787, 0.5901] ) self.assertTrue(np.allclose(logits[0, :3] , A_ , atol=1e-4 ) ) UpperCamelCase = 2_396 self.assertEqual(logits.argmax(-1 ).item() , A_ )
110
import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class lowercase : def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=False , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> Tuple: """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = seq_length UpperCamelCase = is_training UpperCamelCase = use_input_mask UpperCamelCase = use_token_type_ids UpperCamelCase = use_labels UpperCamelCase = vocab_size UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = max_position_embeddings UpperCamelCase = type_vocab_size UpperCamelCase = type_sequence_label_size UpperCamelCase = initializer_range UpperCamelCase = num_labels UpperCamelCase = num_choices UpperCamelCase = scope def __UpperCamelCase ( self ) -> Any: """simple docstring""" UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase = None if self.use_input_mask: UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase = None if self.use_token_type_ids: UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase = None UpperCamelCase = None UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" return LlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , ) def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> int: """simple docstring""" UpperCamelCase = LlamaModel(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase = model(A_ , attention_mask=A_ ) UpperCamelCase = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> List[Any]: """simple docstring""" UpperCamelCase = True UpperCamelCase = LlamaModel(A_ ) model.to(A_ ) model.eval() UpperCamelCase = model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , ) UpperCamelCase = model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , ) UpperCamelCase = model(A_ , attention_mask=A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> str: """simple docstring""" UpperCamelCase = LlamaForCausalLM(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]: """simple docstring""" UpperCamelCase = True UpperCamelCase = True UpperCamelCase = LlamaForCausalLM(config=A_ ) model.to(A_ ) model.eval() # first forward pass UpperCamelCase = model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , use_cache=A_ , ) UpperCamelCase = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCamelCase = torch.cat([input_mask, next_mask] , dim=-1 ) UpperCamelCase = model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , output_hidden_states=A_ , )['hidden_states'][0] UpperCamelCase = model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , past_key_values=A_ , output_hidden_states=A_ , )['hidden_states'][0] # select random slice UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCamelCase = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(A_ , A_ , atol=1e-3 ) ) def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) = config_and_inputs UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ): __lowercase : str = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () __lowercase : str = (LlamaForCausalLM,) if is_torch_available() else () __lowercase : Any = ( { "feature-extraction": LlamaModel, "text-classification": LlamaForSequenceClassification, "text-generation": LlamaForCausalLM, "zero-shot": LlamaForSequenceClassification, } if is_torch_available() else {} ) __lowercase : int = False __lowercase : Optional[int] = False def __UpperCamelCase ( self ) -> int: """simple docstring""" UpperCamelCase = LlamaModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=A_ , hidden_size=37 ) def __UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCamelCase = type self.model_tester.create_and_check_model(*A_ ) def __UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = 3 UpperCamelCase = input_dict['input_ids'] UpperCamelCase = input_ids.ne(1 ).to(A_ ) UpperCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) UpperCamelCase = LlamaForSequenceClassification(A_ ) model.to(A_ ) model.eval() UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __UpperCamelCase ( self ) -> Dict: """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = 3 UpperCamelCase = 'single_label_classification' UpperCamelCase = input_dict['input_ids'] UpperCamelCase = input_ids.ne(1 ).to(A_ ) UpperCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) UpperCamelCase = LlamaForSequenceClassification(A_ ) model.to(A_ ) model.eval() UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __UpperCamelCase ( self ) -> List[str]: """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = 3 UpperCamelCase = 'multi_label_classification' UpperCamelCase = input_dict['input_ids'] UpperCamelCase = input_ids.ne(1 ).to(A_ ) UpperCamelCase = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) UpperCamelCase = LlamaForSequenceClassification(A_ ) model.to(A_ ) model.eval() UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('LLaMA buffers include complex numbers, which breaks this test' ) def __UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" pass @parameterized.expand([('linear',), ('dynamic',)] ) def __UpperCamelCase ( self , A_ ) -> Optional[int]: """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = ids_tensor([1, 10] , config.vocab_size ) UpperCamelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights UpperCamelCase = LlamaModel(A_ ) original_model.to(A_ ) original_model.eval() UpperCamelCase = original_model(A_ ).last_hidden_state UpperCamelCase = original_model(A_ ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights UpperCamelCase = {'type': scaling_type, 'factor': 10.0} UpperCamelCase = LlamaModel(A_ ) scaled_model.to(A_ ) scaled_model.eval() UpperCamelCase = scaled_model(A_ ).last_hidden_state UpperCamelCase = scaled_model(A_ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(A_ , A_ , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(A_ , A_ , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(A_ , A_ , atol=1e-5 ) ) @require_torch class lowercase ( unittest.TestCase ): @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] UpperCamelCase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' ) UpperCamelCase = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 UpperCamelCase = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] ) torch.testing.assert_close(out.mean(-1 ) , A_ , atol=1e-2 , rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off UpperCamelCase = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , A_ , atol=1e-5 , rtol=1e-5 ) @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] UpperCamelCase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' ) UpperCamelCase = model(torch.tensor(A_ ) ) # Expected mean on dim = -1 UpperCamelCase = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] ) torch.testing.assert_close(out.mean(-1 ) , A_ , atol=1e-2 , rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off UpperCamelCase = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , A_ , atol=1e-5 , rtol=1e-5 ) @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] UpperCamelCase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' ) UpperCamelCase = model(torch.tensor(A_ ) ) # Expected mean on dim = -1 UpperCamelCase = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] ) torch.testing.assert_close(out.mean(-1 ) , A_ , atol=1e-2 , rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off UpperCamelCase = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) , A_ , atol=1e-2 , rtol=1e-2 ) @unittest.skip( 'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' ) @slow def __UpperCamelCase ( self ) -> List[str]: """simple docstring""" UpperCamelCase = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] UpperCamelCase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' ) UpperCamelCase = model(torch.tensor(A_ ) ) UpperCamelCase = torch.tensor( [[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) , A_ , atol=1e-2 , rtol=1e-2 ) # fmt: off UpperCamelCase = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , A_ , atol=1e-5 , rtol=1e-5 ) @unittest.skip('Model is curently gated' ) @slow def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" UpperCamelCase = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi' UpperCamelCase = 'Simply put, the theory of relativity states that ' UpperCamelCase = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ) UpperCamelCase = tokenizer.encode(A_ , return_tensors='pt' ) UpperCamelCase = LlamaForCausalLM.from_pretrained( 'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=A_ ) # greedy generation outputs UpperCamelCase = model.generate(A_ , max_new_tokens=64 , top_p=A_ , temperature=1 , do_sample=A_ ) UpperCamelCase = tokenizer.decode(generated_ids[0] , skip_special_tokens=A_ ) self.assertEqual(A_ , A_ )
110
1
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _lowerCamelCase ( a_ , a_ , a_ , unittest.TestCase ): _lowerCamelCase :Union[str, Any] = AltDiffusionPipeline _lowerCamelCase :List[str] = TEXT_TO_IMAGE_PARAMS _lowerCamelCase :Any = TEXT_TO_IMAGE_BATCH_PARAMS _lowerCamelCase :Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS _lowerCamelCase :Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS def _lowerCAmelCase ( self : Any ) -> Optional[int]: """simple docstring""" torch.manual_seed(0 ) lowerCAmelCase__ : str = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) lowerCAmelCase__ : List[str] = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase , set_alpha_to_one=UpperCamelCase , ) torch.manual_seed(0 ) lowerCAmelCase__ : List[str] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) # TODO: address the non-deterministic text encoder (fails for save-load tests) # torch.manual_seed(0) # text_encoder_config = RobertaSeriesConfig( # hidden_size=32, # project_dim=32, # intermediate_size=37, # layer_norm_eps=1e-05, # num_attention_heads=4, # num_hidden_layers=5, # vocab_size=5002, # ) # text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config) torch.manual_seed(0 ) lowerCAmelCase__ : Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_02 , ) lowerCAmelCase__ : Union[str, Any] = CLIPTextModel(UpperCamelCase ) lowerCAmelCase__ : Optional[int] = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" ) lowerCAmelCase__ : List[str] = 77 lowerCAmelCase__ : Any = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def _lowerCAmelCase ( self : Dict , UpperCamelCase : int , UpperCamelCase : Any=0 ) -> Any: """simple docstring""" if str(UpperCamelCase ).startswith("""mps""" ): lowerCAmelCase__ : Optional[int] = torch.manual_seed(UpperCamelCase ) else: lowerCAmelCase__ : Any = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def _lowerCAmelCase ( self : Optional[Any] ) -> int: """simple docstring""" super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 ) def _lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def _lowerCAmelCase ( self : List[Any] ) -> str: """simple docstring""" lowerCAmelCase__ : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ : List[Any] = self.get_dummy_components() torch.manual_seed(0 ) lowerCAmelCase__ : Optional[Any] = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , ) # TODO: remove after fixing the non-deterministic text encoder lowerCAmelCase__ : Optional[int] = RobertaSeriesModelWithTransformation(UpperCamelCase ) lowerCAmelCase__ : Tuple = text_encoder lowerCAmelCase__ : Dict = AltDiffusionPipeline(**UpperCamelCase ) lowerCAmelCase__ : Tuple = alt_pipe.to(UpperCamelCase ) alt_pipe.set_progress_bar_config(disable=UpperCamelCase ) lowerCAmelCase__ : List[str] = self.get_dummy_inputs(UpperCamelCase ) lowerCAmelCase__ : List[Any] = """A photo of an astronaut""" lowerCAmelCase__ : int = alt_pipe(**UpperCamelCase ) lowerCAmelCase__ : Dict = output.images lowerCAmelCase__ : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ : List[str] = np.array( [0.574_8162, 0.6044_7145, 0.4882_1217, 0.5010_0636, 0.543_1185, 0.4576_3683, 0.4965_7696, 0.4813_2733, 0.4757_3093] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _lowerCAmelCase ( self : Dict ) -> List[Any]: """simple docstring""" lowerCAmelCase__ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ : str = self.get_dummy_components() lowerCAmelCase__ : Dict = PNDMScheduler(skip_prk_steps=UpperCamelCase ) torch.manual_seed(0 ) lowerCAmelCase__ : List[Any] = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , ) # TODO: remove after fixing the non-deterministic text encoder lowerCAmelCase__ : Optional[Any] = RobertaSeriesModelWithTransformation(UpperCamelCase ) lowerCAmelCase__ : List[str] = text_encoder lowerCAmelCase__ : Dict = AltDiffusionPipeline(**UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = alt_pipe.to(UpperCamelCase ) alt_pipe.set_progress_bar_config(disable=UpperCamelCase ) lowerCAmelCase__ : Dict = self.get_dummy_inputs(UpperCamelCase ) lowerCAmelCase__ : List[str] = alt_pipe(**UpperCamelCase ) lowerCAmelCase__ : int = output.images lowerCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ : Union[str, Any] = np.array( [0.5160_5093, 0.570_7241, 0.4736_5507, 0.5057_8886, 0.563_3877, 0.464_2503, 0.518_2081, 0.4876_3484, 0.4908_4237] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class _lowerCamelCase ( unittest.TestCase ): def _lowerCAmelCase ( self : str ) -> Tuple: """simple docstring""" # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCAmelCase ( self : List[str] ) -> str: """simple docstring""" # make sure here that pndm scheduler skips prk lowerCAmelCase__ : str = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , safety_checker=UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = alt_pipe.to(UpperCamelCase ) alt_pipe.set_progress_bar_config(disable=UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = """A painting of a squirrel eating a burger""" lowerCAmelCase__ : Optional[Any] = torch.manual_seed(0 ) lowerCAmelCase__ : Union[str, Any] = alt_pipe([prompt] , generator=UpperCamelCase , guidance_scale=6.0 , num_inference_steps=20 , output_type="""np""" ) lowerCAmelCase__ : int = output.images lowerCAmelCase__ : int = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase__ : Tuple = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _lowerCAmelCase ( self : int ) -> List[str]: """simple docstring""" lowerCAmelCase__ : List[Any] = DDIMScheduler.from_pretrained("""BAAI/AltDiffusion""" , subfolder="""scheduler""" ) lowerCAmelCase__ : int = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , scheduler=UpperCamelCase , safety_checker=UpperCamelCase ) lowerCAmelCase__ : Dict = alt_pipe.to(UpperCamelCase ) alt_pipe.set_progress_bar_config(disable=UpperCamelCase ) lowerCAmelCase__ : Any = """A painting of a squirrel eating a burger""" lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(0 ) lowerCAmelCase__ : str = alt_pipe([prompt] , generator=UpperCamelCase , num_inference_steps=2 , output_type="""numpy""" ) lowerCAmelCase__ : Optional[int] = output.images lowerCAmelCase__ : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase__ : Any = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
242
"""simple docstring""" from itertools import product def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> list[int]: lowerCAmelCase__ : Union[str, Any] = sides_number lowerCAmelCase__ : Optional[int] = max_face_number * dice_number lowerCAmelCase__ : List[str] = [0] * (max_total + 1) lowerCAmelCase__ : Union[str, Any] = 1 lowerCAmelCase__ : Optional[int] = range(__UpperCAmelCase , max_face_number + 1 ) for dice_numbers in product(__UpperCAmelCase , repeat=__UpperCAmelCase ): lowerCAmelCase__ : str = sum(__UpperCAmelCase ) totals_frequencies[total] += 1 return totals_frequencies def lowercase_ ( ) -> float: lowerCAmelCase__ : Union[str, Any] = total_frequency_distribution( sides_number=4 , dice_number=9 ) lowerCAmelCase__ : Tuple = total_frequency_distribution( sides_number=6 , dice_number=6 ) lowerCAmelCase__ : str = 0 lowerCAmelCase__ : int = 9 lowerCAmelCase__ : Tuple = 4 * 9 lowerCAmelCase__ : Optional[int] = 6 for peter_total in range(__UpperCAmelCase , max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) lowerCAmelCase__ : Tuple = (4**9) * (6**6) lowerCAmelCase__ : Union[str, Any] = peter_wins_count / total_games_number lowerCAmelCase__ : Optional[int] = round(__UpperCAmelCase , ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(f"""{solution() = }""")
242
1
from manim import * class lowerCAmelCase_ ( lowerCamelCase__ ): def snake_case_ ( self ) -> int: UpperCamelCase : Dict = Rectangle(height=0.5, width=0.5 ) UpperCamelCase : Union[str, Any] = Rectangle(height=0.46, width=0.46 ).set_stroke(width=0 ) UpperCamelCase : Optional[Any] = [mem.copy() for i in range(6 )] UpperCamelCase : Dict = [mem.copy() for i in range(6 )] UpperCamelCase : List[Any] = VGroup(*__snake_case ).arrange(__snake_case, buff=0 ) UpperCamelCase : int = VGroup(*__snake_case ).arrange(__snake_case, buff=0 ) UpperCamelCase : Union[str, Any] = VGroup(__snake_case, __snake_case ).arrange(__snake_case, buff=0 ) UpperCamelCase : Any = Text('CPU', font_size=24 ) UpperCamelCase : int = Group(__snake_case, __snake_case ).arrange(__snake_case, buff=0.5, aligned_edge=__snake_case ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__snake_case ) UpperCamelCase : int = [mem.copy() for i in range(4 )] UpperCamelCase : str = VGroup(*__snake_case ).arrange(__snake_case, buff=0 ) UpperCamelCase : List[Any] = Text('GPU', font_size=24 ) UpperCamelCase : Optional[int] = Group(__snake_case, __snake_case ).arrange(__snake_case, buff=0.5, aligned_edge=__snake_case ) gpu.move_to([-1, -1, 0] ) self.add(__snake_case ) UpperCamelCase : List[str] = [mem.copy() for i in range(6 )] UpperCamelCase : List[str] = VGroup(*__snake_case ).arrange(__snake_case, buff=0 ) UpperCamelCase : int = Text('Model', font_size=24 ) UpperCamelCase : Dict = Group(__snake_case, __snake_case ).arrange(__snake_case, buff=0.5, aligned_edge=__snake_case ) model.move_to([3, -1.0, 0] ) self.add(__snake_case ) UpperCamelCase : Optional[Any] = [] for i, rect in enumerate(__snake_case ): rect.set_stroke(__snake_case ) # target = fill.copy().set_fill(YELLOW, opacity=0.7) # target.move_to(rect) # self.add(target) UpperCamelCase : Optional[int] = Rectangle(height=0.46 / 4, width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__snake_case, opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ), buff=0.02, direction=__snake_case ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(cpu_targs[0], direction=__snake_case, buff=0.0 ) else: cpu_target.next_to(cpu_targs[i - 1], direction=__snake_case, buff=0.0 ) self.add(__snake_case ) cpu_targs.append(__snake_case ) UpperCamelCase : List[Any] = [mem.copy() for i in range(6 )] UpperCamelCase : Optional[int] = VGroup(*__snake_case ).arrange(__snake_case, buff=0 ) UpperCamelCase : Tuple = Text('Loaded Checkpoint', font_size=24 ) UpperCamelCase : str = Group(__snake_case, __snake_case ).arrange(__snake_case, aligned_edge=__snake_case, buff=0.4 ) checkpoint.move_to([3, 0.5, 0] ) UpperCamelCase : int = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) UpperCamelCase : Union[str, Any] = MarkupText( F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""", font_size=18, ) key_text.move_to([-5, 2.4, 0] ) self.add(__snake_case, __snake_case ) UpperCamelCase : Tuple = MarkupText( F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""", font_size=18, ) blue_text.next_to(__snake_case, DOWN * 2.4, aligned_edge=key_text.get_left() ) UpperCamelCase : Optional[int] = MarkupText( F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""", font_size=24, ) step_a.move_to([2, 2, 0] ) self.play(Write(__snake_case ), Write(__snake_case ) ) self.play(Write(__snake_case, run_time=1 ), Create(__snake_case, run_time=1 ) ) UpperCamelCase : List[str] = [] UpperCamelCase : Dict = [] for i, rect in enumerate(__snake_case ): UpperCamelCase : Optional[int] = fill.copy().set_fill(__snake_case, opacity=0.7 ) target.move_to(__snake_case ) first_animations.append(GrowFromCenter(__snake_case, run_time=1 ) ) UpperCamelCase : List[Any] = target.copy() cpu_target.generate_target() if i < 5: cpu_target.target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.target.move_to(cpu_right_col_base[i - 5] ) second_animations.append(MoveToTarget(__snake_case, run_time=1.5 ) ) self.play(*__snake_case ) self.play(*__snake_case ) self.wait()
358
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class lowerCAmelCase_ ( unittest.TestCase ): def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=224, SCREAMING_SNAKE_CASE_=30, SCREAMING_SNAKE_CASE_=400, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5], SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5], ) -> List[str]: UpperCamelCase : Optional[int] = size if size is not None else {'height': 18, 'width': 18} UpperCamelCase : List[Any] = parent UpperCamelCase : List[Any] = batch_size UpperCamelCase : int = num_channels UpperCamelCase : int = image_size UpperCamelCase : List[Any] = min_resolution UpperCamelCase : int = max_resolution UpperCamelCase : Any = do_resize UpperCamelCase : Optional[int] = size UpperCamelCase : List[str] = do_normalize UpperCamelCase : Optional[Any] = image_mean UpperCamelCase : Tuple = image_std def snake_case_ ( self ) -> List[Any]: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class lowerCAmelCase_ ( a__ , unittest.TestCase ): UpperCAmelCase__ : Optional[Any] = ViTImageProcessor if is_vision_available() else None def snake_case_ ( self ) -> Any: UpperCamelCase : Dict = EfficientFormerImageProcessorTester(self ) @property def snake_case_ ( self ) -> List[Any]: return self.image_proc_tester.prepare_image_processor_dict() def snake_case_ ( self ) -> Optional[int]: UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'image_mean' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'image_std' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'do_normalize' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'do_resize' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'size' ) ) def snake_case_ ( self ) -> Any: pass def snake_case_ ( self ) -> int: # Initialize image_processor UpperCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase : List[str] = prepare_image_inputs(self.image_proc_tester, equal_resolution=SCREAMING_SNAKE_CASE_ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_, Image.Image ) # Test not batched input UpperCamelCase : str = image_processor(image_inputs[0], return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['height'], self.image_proc_tester.size['width'], ), ) # Test batched UpperCamelCase : Optional[Any] = image_processor(SCREAMING_SNAKE_CASE_, return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['height'], self.image_proc_tester.size['width'], ), ) def snake_case_ ( self ) -> str: # Initialize image_processor UpperCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase : Union[str, Any] = prepare_image_inputs(self.image_proc_tester, equal_resolution=SCREAMING_SNAKE_CASE_, numpify=SCREAMING_SNAKE_CASE_ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_, np.ndarray ) # Test not batched input UpperCamelCase : Dict = image_processor(image_inputs[0], return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['height'], self.image_proc_tester.size['width'], ), ) # Test batched UpperCamelCase : Dict = image_processor(SCREAMING_SNAKE_CASE_, return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['height'], self.image_proc_tester.size['width'], ), ) def snake_case_ ( self ) -> Tuple: # Initialize image_processor UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase : int = prepare_image_inputs(self.image_proc_tester, equal_resolution=SCREAMING_SNAKE_CASE_, torchify=SCREAMING_SNAKE_CASE_ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_, torch.Tensor ) # Test not batched input UpperCamelCase : Optional[int] = image_processor(image_inputs[0], return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['height'], self.image_proc_tester.size['width'], ), ) # Test batched UpperCamelCase : int = image_processor(SCREAMING_SNAKE_CASE_, return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['height'], self.image_proc_tester.size['width'], ), )
103
0
'''simple docstring''' from __future__ import annotations from PIL import Image # Define glider example __snake_case =[ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example __snake_case =[[0, 1, 0], [0, 1, 0], [0, 1, 0]] def a_ ( lowerCamelCase : list[list[int]] ): lowerCAmelCase = [] for i in range(len(lowerCamelCase ) ): lowerCAmelCase = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours lowerCAmelCase = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(lowerCamelCase ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(lowerCamelCase ) - 1: neighbour_count += cells[i + 1][j] if i < len(lowerCamelCase ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. lowerCAmelCase = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(lowerCamelCase ) return next_generation def a_ ( lowerCamelCase : list[list[int]] , lowerCamelCase : int ): lowerCAmelCase = [] for _ in range(lowerCamelCase ): # Create output image lowerCAmelCase = Image.new('RGB' , (len(cells[0] ), len(lowerCamelCase )) ) lowerCAmelCase = img.load() # Save cells to image for x in range(len(lowerCamelCase ) ): for y in range(len(cells[0] ) ): lowerCAmelCase = 255 - cells[y][x] * 255 lowerCAmelCase = (colour, colour, colour) # Save image images.append(lowerCamelCase ) lowerCAmelCase = new_generation(lowerCamelCase ) return images if __name__ == "__main__": __snake_case =generate_images(GLIDER, 16) images[0].save("""out.gif""", save_all=True, append_images=images[1:])
4
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class a__ ( unittest.TestCase ): @slow def lowercase ( self : List[Any] ) -> List[Any]: lowercase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' ) lowercase : Dict = AutoTokenizer.from_pretrained('google/mt5-small' ) lowercase : List[Any] = tokenizer('Hello there', return_tensors='tf' ).input_ids lowercase : Any = tokenizer('Hi I am', return_tensors='tf' ).input_ids lowercase : Dict = model(lowerCAmelCase, labels=lowerCAmelCase ).loss lowercase : Optional[int] = -tf.math.reduce_mean(lowerCAmelCase ).numpy() lowercase : Tuple = -21.22_8168 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
255
0
import warnings from .generation import TFGenerationMixin class lowerCamelCase__ ( lowerCamelCase__): '''simple docstring''' warnings.warn( """Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will """ """be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.""" , lowerCamelCase__ , )
350
def lowerCAmelCase__ ( lowerCamelCase_ : int ,lowerCamelCase_ : int): '''simple docstring''' while b: lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = b, a % b return a def lowerCAmelCase__ ( lowerCamelCase_ : int ,lowerCamelCase_ : int): '''simple docstring''' return a if b == 0 else euclidean_gcd_recursive(lowerCamelCase_ ,a % b) def lowerCAmelCase__ ( ): '''simple docstring''' print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 ,5)}""") print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 ,3)}""") print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 ,3)}""") print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 ,6)}""") print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 ,3)}""") print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 ,5)}""") print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 ,3)}""") print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 ,3)}""") print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 ,6)}""") print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 ,3)}""") if __name__ == "__main__": main()
94
0
from ....configuration_utils import PretrainedConfig from ....utils import logging A__ = logging.get_logger(__name__) A__ = { """speechbrain/m-ctc-t-large""": """https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json""", # See all M-CTC-T models at https://huggingface.co/models?filter=mctct } class __lowerCAmelCase ( lowerCamelCase__ ): __lowerCamelCase = '''mctct''' def __init__( self , _snake_case=8065 , _snake_case=1536 , _snake_case=36 , _snake_case=6144 , _snake_case=4 , _snake_case=384 , _snake_case=920 , _snake_case=1e-5 , _snake_case=0.3 , _snake_case="relu" , _snake_case=0.02 , _snake_case=0.3 , _snake_case=0.3 , _snake_case=1 , _snake_case=0 , _snake_case=2 , _snake_case=1 , _snake_case=0.3 , _snake_case=1 , _snake_case=(7,) , _snake_case=(3,) , _snake_case=80 , _snake_case=1 , _snake_case=None , _snake_case="sum" , _snake_case=False , **_snake_case , ): """simple docstring""" super().__init__(**_snake_case , pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case ) _lowerCAmelCase = vocab_size _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = intermediate_size _lowerCAmelCase = num_attention_heads _lowerCAmelCase = attention_head_dim _lowerCAmelCase = max_position_embeddings _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = layerdrop _lowerCAmelCase = hidden_act _lowerCAmelCase = initializer_range _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = pad_token_id _lowerCAmelCase = bos_token_id _lowerCAmelCase = eos_token_id _lowerCAmelCase = conv_glu_dim _lowerCAmelCase = conv_dropout _lowerCAmelCase = num_conv_layers _lowerCAmelCase = input_feat_per_channel _lowerCAmelCase = input_channels _lowerCAmelCase = conv_channels _lowerCAmelCase = ctc_loss_reduction _lowerCAmelCase = ctc_zero_infinity # prevents config testing fail with exporting to json _lowerCAmelCase = list(_snake_case ) _lowerCAmelCase = list(_snake_case ) if len(self.conv_kernel ) != self.num_conv_layers: raise ValueError( """Configuration for convolutional module is incorrect. """ """It is required that `len(config.conv_kernel)` == `config.num_conv_layers` """ F'but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, ' F'`config.num_conv_layers = {self.num_conv_layers}`.' )
82
from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class __lowerCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): @register_to_config def __init__( self , _snake_case = 768 , ): """simple docstring""" super().__init__() _lowerCAmelCase = nn.Parameter(torch.zeros(1 , _snake_case ) ) _lowerCAmelCase = nn.Parameter(torch.ones(1 , _snake_case ) ) def snake_case ( self , _snake_case = None , _snake_case = None , ): """simple docstring""" _lowerCAmelCase = nn.Parameter(self.mean.to(_snake_case ).to(_snake_case ) ) _lowerCAmelCase = nn.Parameter(self.std.to(_snake_case ).to(_snake_case ) ) return self def snake_case ( self , _snake_case ): """simple docstring""" _lowerCAmelCase = (embeds - self.mean) * 1.0 / self.std return embeds def snake_case ( self , _snake_case ): """simple docstring""" _lowerCAmelCase = (embeds * self.std) + self.mean return embeds
82
1
"""simple docstring""" import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: __lowercase = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self , __lowercase , __lowercase=7 , __lowercase=3 , __lowercase=18 , __lowercase=30 , __lowercase=400 , __lowercase=None , __lowercase=True , __lowercase=True , __lowercase=None , ) -> Optional[int]: __UpperCamelCase :Optional[Any] = size if size is not None else {'''height''': 20, '''width''': 20} __UpperCamelCase :List[Any] = parent __UpperCamelCase :Dict = batch_size __UpperCamelCase :Any = num_channels __UpperCamelCase :Dict = image_size __UpperCamelCase :Union[str, Any] = min_resolution __UpperCamelCase :Tuple = max_resolution __UpperCamelCase :Any = size __UpperCamelCase :Optional[int] = do_normalize __UpperCamelCase :Tuple = do_convert_rgb __UpperCamelCase :List[str] = [512, 1_024, 2_048, 4_096] __UpperCamelCase :str = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16} def UpperCamelCase__ ( self) -> Tuple: return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def UpperCamelCase__ ( self) -> Tuple: __UpperCamelCase :Union[str, Any] = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg''' __UpperCamelCase :Optional[Any] = Image.open(requests.get(__lowercase , stream=__lowercase).raw).convert('''RGB''') return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , ) @require_torch @require_vision class lowerCamelCase_ ( UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' a__ : Optional[Any] = PixaStructImageProcessor if is_vision_available() else None def UpperCamelCase__ ( self) -> Dict: __UpperCamelCase :Union[str, Any] = PixaStructImageProcessingTester(self) @property def UpperCamelCase__ ( self) -> str: return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase__ ( self) -> Optional[Any]: __UpperCamelCase :Tuple = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(__lowercase , '''do_normalize''')) self.assertTrue(hasattr(__lowercase , '''do_convert_rgb''')) def UpperCamelCase__ ( self) -> Dict: __UpperCamelCase :Union[str, Any] = self.image_processor_tester.prepare_dummy_image() __UpperCamelCase :Any = self.image_processing_class(**self.image_processor_dict) __UpperCamelCase :int = 2_048 __UpperCamelCase :Tuple = image_processor(__lowercase , return_tensors='''pt''' , max_patches=__lowercase) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.06_06) , atol=1E-3 , rtol=1E-3)) def UpperCamelCase__ ( self) -> List[Any]: # Initialize image_processor __UpperCamelCase :List[Any] = self.image_processing_class(**self.image_processor_dict) # create random PIL images __UpperCamelCase :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase) for image in image_inputs: self.assertIsInstance(__lowercase , Image.Image) # Test not batched input __UpperCamelCase :Optional[int] = ( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input __UpperCamelCase :Union[str, Any] = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=__lowercase).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched __UpperCamelCase :Any = image_processor( __lowercase , return_tensors='''pt''' , max_patches=__lowercase).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def UpperCamelCase__ ( self) -> Union[str, Any]: # Initialize image_processor __UpperCamelCase :Any = self.image_processing_class(**self.image_processor_dict) # create random PIL images __UpperCamelCase :str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase) for image in image_inputs: self.assertIsInstance(__lowercase , Image.Image) # Test not batched input __UpperCamelCase :Any = ( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * self.image_processor_tester.num_channels ) + 2 __UpperCamelCase :str = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(__lowercase): __UpperCamelCase :Tuple = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=__lowercase).flattened_patches __UpperCamelCase :int = '''Hello''' __UpperCamelCase :str = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=__lowercase , header_text=__lowercase).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched __UpperCamelCase :List[Any] = image_processor( __lowercase , return_tensors='''pt''' , max_patches=__lowercase , header_text=__lowercase).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def UpperCamelCase__ ( self) -> List[str]: # Initialize image_processor __UpperCamelCase :Optional[int] = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors __UpperCamelCase :str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase) for image in image_inputs: self.assertIsInstance(__lowercase , np.ndarray) __UpperCamelCase :Optional[Any] = ( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input __UpperCamelCase :Dict = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=__lowercase).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched __UpperCamelCase :str = image_processor( __lowercase , return_tensors='''pt''' , max_patches=__lowercase).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def UpperCamelCase__ ( self) -> Optional[int]: # Initialize image_processor __UpperCamelCase :Any = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors __UpperCamelCase :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase) for image in image_inputs: self.assertIsInstance(__lowercase , torch.Tensor) # Test not batched input __UpperCamelCase :Union[str, Any] = ( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input __UpperCamelCase :Tuple = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=__lowercase).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched __UpperCamelCase :Optional[Any] = image_processor( __lowercase , return_tensors='''pt''' , max_patches=__lowercase).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , ) @require_torch @require_vision class lowerCamelCase_ ( UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' a__ : Tuple = PixaStructImageProcessor if is_vision_available() else None def UpperCamelCase__ ( self) -> Dict: __UpperCamelCase :List[Any] = PixaStructImageProcessingTester(self , num_channels=4) __UpperCamelCase :int = 3 @property def UpperCamelCase__ ( self) -> Union[str, Any]: return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase__ ( self) -> Tuple: __UpperCamelCase :Optional[int] = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(__lowercase , '''do_normalize''')) self.assertTrue(hasattr(__lowercase , '''do_convert_rgb''')) def UpperCamelCase__ ( self) -> Union[str, Any]: # Initialize image_processor __UpperCamelCase :List[str] = self.image_processing_class(**self.image_processor_dict) # create random PIL images __UpperCamelCase :str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase) for image in image_inputs: self.assertIsInstance(__lowercase , Image.Image) # Test not batched input __UpperCamelCase :Union[str, Any] = ( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input __UpperCamelCase :int = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=__lowercase).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched __UpperCamelCase :Any = image_processor( __lowercase , return_tensors='''pt''' , max_patches=__lowercase).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
358
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ ( self) -> int: __UpperCamelCase :str = 0 def UpperCamelCase__ ( self) -> Optional[Any]: __UpperCamelCase :Dict = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''') self.assertIsInstance(__lowercase , __lowercase) def UpperCamelCase__ ( self) -> Optional[int]: with tempfile.TemporaryDirectory() as tmpdirname: __UpperCamelCase :int = Path(__lowercase) / '''preprocessor_config.json''' __UpperCamelCase :Dict = Path(__lowercase) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowercase , '''w''') , ) json.dump({'''model_type''': '''clip'''} , open(__lowercase , '''w''')) __UpperCamelCase :Union[str, Any] = AutoImageProcessor.from_pretrained(__lowercase) self.assertIsInstance(__lowercase , __lowercase) def UpperCamelCase__ ( self) -> Union[str, Any]: # Ensure we can load the image processor from the feature extractor config with tempfile.TemporaryDirectory() as tmpdirname: __UpperCamelCase :str = Path(__lowercase) / '''preprocessor_config.json''' __UpperCamelCase :Union[str, Any] = Path(__lowercase) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowercase , '''w''') , ) json.dump({'''model_type''': '''clip'''} , open(__lowercase , '''w''')) __UpperCamelCase :Dict = AutoImageProcessor.from_pretrained(__lowercase) self.assertIsInstance(__lowercase , __lowercase) def UpperCamelCase__ ( self) -> Optional[int]: with tempfile.TemporaryDirectory() as tmpdirname: __UpperCamelCase :int = CLIPConfig() # Create a dummy config file with image_proceesor_type __UpperCamelCase :Tuple = Path(__lowercase) / '''preprocessor_config.json''' __UpperCamelCase :Optional[Any] = Path(__lowercase) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowercase , '''w''') , ) json.dump({'''model_type''': '''clip'''} , open(__lowercase , '''w''')) # remove image_processor_type to make sure config.json alone is enough to load image processor locally __UpperCamelCase :Optional[Any] = AutoImageProcessor.from_pretrained(__lowercase).to_dict() config_dict.pop('''image_processor_type''') __UpperCamelCase :List[str] = CLIPImageProcessor(**__lowercase) # save in new folder model_config.save_pretrained(__lowercase) config.save_pretrained(__lowercase) __UpperCamelCase :Dict = AutoImageProcessor.from_pretrained(__lowercase) # make sure private variable is not incorrectly saved __UpperCamelCase :Union[str, Any] = json.loads(config.to_json_string()) self.assertTrue('''_processor_class''' not in dict_as_saved) self.assertIsInstance(__lowercase , __lowercase) def UpperCamelCase__ ( self) -> List[str]: with tempfile.TemporaryDirectory() as tmpdirname: __UpperCamelCase :Tuple = Path(__lowercase) / '''preprocessor_config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowercase , '''w''') , ) __UpperCamelCase :List[str] = AutoImageProcessor.from_pretrained(__lowercase) self.assertIsInstance(__lowercase , __lowercase) def UpperCamelCase__ ( self) -> Optional[int]: with self.assertRaisesRegex( __lowercase , '''clip-base is not a local folder and is not a valid model identifier'''): __UpperCamelCase :Optional[Any] = AutoImageProcessor.from_pretrained('''clip-base''') def UpperCamelCase__ ( self) -> List[Any]: with self.assertRaisesRegex( __lowercase , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'''): __UpperCamelCase :str = AutoImageProcessor.from_pretrained(__lowercase , revision='''aaaaaa''') def UpperCamelCase__ ( self) -> List[str]: with self.assertRaisesRegex( __lowercase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ): __UpperCamelCase :Optional[Any] = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''') def UpperCamelCase__ ( self) -> str: # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(__lowercase): __UpperCamelCase :Dict = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''') # If remote code is disabled, we can't load this config. with self.assertRaises(__lowercase): __UpperCamelCase :List[Any] = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__lowercase) __UpperCamelCase :Optional[Any] = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__lowercase) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''') # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(__lowercase) __UpperCamelCase :List[Any] = AutoImageProcessor.from_pretrained(__lowercase , trust_remote_code=__lowercase) self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''') def UpperCamelCase__ ( self) -> Optional[Any]: try: AutoConfig.register('''custom''' , __lowercase) AutoImageProcessor.register(__lowercase , __lowercase) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__lowercase): AutoImageProcessor.register(__lowercase , __lowercase) with tempfile.TemporaryDirectory() as tmpdirname: __UpperCamelCase :int = Path(__lowercase) / '''preprocessor_config.json''' __UpperCamelCase :List[str] = Path(__lowercase) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowercase , '''w''') , ) json.dump({'''model_type''': '''clip'''} , open(__lowercase , '''w''')) __UpperCamelCase :int = CustomImageProcessor.from_pretrained(__lowercase) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(__lowercase) __UpperCamelCase :int = AutoImageProcessor.from_pretrained(__lowercase) self.assertIsInstance(__lowercase , __lowercase) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def UpperCamelCase__ ( self) -> List[Any]: class lowerCamelCase_ ( UpperCAmelCase_ ): '''simple docstring''' a__ : List[str] = True try: AutoConfig.register('''custom''' , __lowercase) AutoImageProcessor.register(__lowercase , __lowercase) # If remote code is not set, the default is to use local __UpperCamelCase :str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''') self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''') self.assertTrue(image_processor.is_local) # If remote code is disabled, we load the local one. __UpperCamelCase :Optional[Any] = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__lowercase) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''') self.assertTrue(image_processor.is_local) # If remote is enabled, we load from the Hub __UpperCamelCase :List[str] = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__lowercase) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''') self.assertTrue(not hasattr(__lowercase , '''is_local''')) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
105
0
"""simple docstring""" from __future__ import annotations import os from collections.abc import Mapping __snake_case : Dict = tuple[int, int] class A__ : '''simple docstring''' def __init__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: set[int] , _SCREAMING_SNAKE_CASE: Mapping[EdgeT, int]) -> None: """simple docstring""" __lowerCAmelCase : set[int] = vertices __lowerCAmelCase : dict[EdgeT, int] = { (min(_SCREAMING_SNAKE_CASE), max(_SCREAMING_SNAKE_CASE)): weight for edge, weight in edges.items() } def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: EdgeT , _SCREAMING_SNAKE_CASE: int) -> None: """simple docstring""" self.vertices.add(edge[0]) self.vertices.add(edge[1]) __lowerCAmelCase : str = weight def _SCREAMING_SNAKE_CASE ( self: str) -> Graph: """simple docstring""" __lowerCAmelCase : Graph = Graph({min(self.vertices)} , {}) __lowerCAmelCase : EdgeT __lowerCAmelCase : int __lowerCAmelCase : EdgeT __lowerCAmelCase : int while len(subgraph.vertices) < len(self.vertices): __lowerCAmelCase : List[Any] = max(self.edges.values()) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: __lowerCAmelCase : str = edge __lowerCAmelCase : List[Any] = weight subgraph.add_edge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) return subgraph def _lowercase ( __snake_case = "p107_network.txt" ) -> int: __lowerCAmelCase : str = os.path.abspath(os.path.dirname(__snake_case ) ) __lowerCAmelCase : str = os.path.join(__snake_case ,__snake_case ) __lowerCAmelCase : dict[EdgeT, int] = {} __lowerCAmelCase : list[str] __lowerCAmelCase : int __lowerCAmelCase : int with open(__snake_case ) as f: __lowerCAmelCase : List[str] = f.read().strip().split("\n" ) __lowerCAmelCase : Optional[int] = [line.split("," ) for line in data] for edgea in range(1 ,len(__snake_case ) ): for edgea in range(__snake_case ): if adjaceny_matrix[edgea][edgea] != "-": __lowerCAmelCase : Dict = int(adjaceny_matrix[edgea][edgea] ) __lowerCAmelCase : Graph = Graph(set(range(len(__snake_case ) ) ) ,__snake_case ) __lowerCAmelCase : Graph = graph.prims_algorithm() __lowerCAmelCase : int = sum(graph.edges.values() ) __lowerCAmelCase : int = sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(F"""{solution() = }""")
269
"""simple docstring""" import unittest from transformers import SqueezeBertConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, ) class A__ ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: List[str]=13 , _SCREAMING_SNAKE_CASE: Tuple=7 , _SCREAMING_SNAKE_CASE: int=True , _SCREAMING_SNAKE_CASE: Optional[Any]=True , _SCREAMING_SNAKE_CASE: str=False , _SCREAMING_SNAKE_CASE: Optional[Any]=True , _SCREAMING_SNAKE_CASE: int=99 , _SCREAMING_SNAKE_CASE: int=32 , _SCREAMING_SNAKE_CASE: List[str]=5 , _SCREAMING_SNAKE_CASE: Union[str, Any]=4 , _SCREAMING_SNAKE_CASE: int=64 , _SCREAMING_SNAKE_CASE: List[str]="gelu" , _SCREAMING_SNAKE_CASE: str=0.1 , _SCREAMING_SNAKE_CASE: Any=0.1 , _SCREAMING_SNAKE_CASE: Optional[int]=512 , _SCREAMING_SNAKE_CASE: Tuple=16 , _SCREAMING_SNAKE_CASE: Any=2 , _SCREAMING_SNAKE_CASE: List[str]=0.02 , _SCREAMING_SNAKE_CASE: Tuple=3 , _SCREAMING_SNAKE_CASE: Optional[Any]=4 , _SCREAMING_SNAKE_CASE: int=None , _SCREAMING_SNAKE_CASE: int=2 , _SCREAMING_SNAKE_CASE: str=2 , _SCREAMING_SNAKE_CASE: Union[str, Any]=2 , _SCREAMING_SNAKE_CASE: List[Any]=2 , _SCREAMING_SNAKE_CASE: int=4 , _SCREAMING_SNAKE_CASE: List[str]=1 , ) -> Optional[Any]: """simple docstring""" __lowerCAmelCase : List[str] = parent __lowerCAmelCase : Optional[Any] = batch_size __lowerCAmelCase : Union[str, Any] = seq_length __lowerCAmelCase : Optional[Any] = is_training __lowerCAmelCase : Optional[int] = use_input_mask __lowerCAmelCase : Dict = use_token_type_ids __lowerCAmelCase : Dict = use_labels __lowerCAmelCase : Dict = vocab_size __lowerCAmelCase : Tuple = hidden_size __lowerCAmelCase : List[Any] = num_hidden_layers __lowerCAmelCase : Union[str, Any] = num_attention_heads __lowerCAmelCase : Tuple = intermediate_size __lowerCAmelCase : List[Any] = hidden_act __lowerCAmelCase : Optional[Any] = hidden_dropout_prob __lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob __lowerCAmelCase : Optional[int] = max_position_embeddings __lowerCAmelCase : Union[str, Any] = type_vocab_size __lowerCAmelCase : Optional[int] = type_sequence_label_size __lowerCAmelCase : Dict = initializer_range __lowerCAmelCase : Tuple = num_labels __lowerCAmelCase : Optional[Any] = num_choices __lowerCAmelCase : Union[str, Any] = scope __lowerCAmelCase : Optional[Any] = q_groups __lowerCAmelCase : Optional[int] = k_groups __lowerCAmelCase : Any = v_groups __lowerCAmelCase : int = post_attention_groups __lowerCAmelCase : List[str] = intermediate_groups __lowerCAmelCase : Optional[Any] = output_groups def _SCREAMING_SNAKE_CASE ( self: Dict) -> List[str]: """simple docstring""" __lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) __lowerCAmelCase : Union[str, Any] = None if self.use_input_mask: __lowerCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length]) __lowerCAmelCase : Optional[int] = None __lowerCAmelCase : List[Any] = None __lowerCAmelCase : str = None if self.use_labels: __lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size) __lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) __lowerCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices) __lowerCAmelCase : Any = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _SCREAMING_SNAKE_CASE ( self: Tuple) -> int: """simple docstring""" return SqueezeBertConfig( embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , ) def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Tuple) -> Tuple: """simple docstring""" __lowerCAmelCase : List[Any] = SqueezeBertModel(config=_SCREAMING_SNAKE_CASE) model.to(_SCREAMING_SNAKE_CASE) model.eval() __lowerCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) __lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Tuple) -> Dict: """simple docstring""" __lowerCAmelCase : int = SqueezeBertForMaskedLM(config=_SCREAMING_SNAKE_CASE) model.to(_SCREAMING_SNAKE_CASE) model.eval() __lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> int: """simple docstring""" __lowerCAmelCase : str = SqueezeBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE) model.to(_SCREAMING_SNAKE_CASE) model.eval() __lowerCAmelCase : Union[str, Any] = model( _SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: Tuple) -> Optional[Any]: """simple docstring""" __lowerCAmelCase : List[Any] = self.num_labels __lowerCAmelCase : Union[str, Any] = SqueezeBertForSequenceClassification(_SCREAMING_SNAKE_CASE) model.to(_SCREAMING_SNAKE_CASE) model.eval() __lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[int]) -> Union[str, Any]: """simple docstring""" __lowerCAmelCase : Dict = self.num_labels __lowerCAmelCase : Optional[int] = SqueezeBertForTokenClassification(config=_SCREAMING_SNAKE_CASE) model.to(_SCREAMING_SNAKE_CASE) model.eval() __lowerCAmelCase : List[str] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: int) -> Tuple: """simple docstring""" __lowerCAmelCase : List[str] = self.num_choices __lowerCAmelCase : str = SqueezeBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE) model.to(_SCREAMING_SNAKE_CASE) model.eval() __lowerCAmelCase : int = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() __lowerCAmelCase : Union[str, Any] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() __lowerCAmelCase : str = model( _SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def _SCREAMING_SNAKE_CASE ( self: str) -> List[Any]: """simple docstring""" __lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs() ((__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase)) : Union[str, Any] = config_and_inputs __lowerCAmelCase : int = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE = ( ( SqueezeBertModel, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, ) if is_torch_available() else None ) SCREAMING_SNAKE_CASE = ( { 'feature-extraction': SqueezeBertModel, 'fill-mask': SqueezeBertForMaskedLM, 'question-answering': SqueezeBertForQuestionAnswering, 'text-classification': SqueezeBertForSequenceClassification, 'token-classification': SqueezeBertForTokenClassification, 'zero-shot': SqueezeBertForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = False def _SCREAMING_SNAKE_CASE ( self: Dict) -> Optional[Any]: """simple docstring""" __lowerCAmelCase : Any = SqueezeBertModelTester(self) __lowerCAmelCase : Optional[int] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , dim=37) def _SCREAMING_SNAKE_CASE ( self: Any) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Any: """simple docstring""" __lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_model(*_SCREAMING_SNAKE_CASE) def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> List[Any]: """simple docstring""" __lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_masked_lm(*_SCREAMING_SNAKE_CASE) def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Union[str, Any]: """simple docstring""" __lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_question_answering(*_SCREAMING_SNAKE_CASE) def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> int: """simple docstring""" __lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_SCREAMING_SNAKE_CASE) def _SCREAMING_SNAKE_CASE ( self: Any) -> int: """simple docstring""" __lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_token_classification(*_SCREAMING_SNAKE_CASE) def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> str: """simple docstring""" __lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_SCREAMING_SNAKE_CASE) @slow def _SCREAMING_SNAKE_CASE ( self: Any) -> Dict: """simple docstring""" for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase : Optional[Any] = SqueezeBertModel.from_pretrained(_SCREAMING_SNAKE_CASE) self.assertIsNotNone(_SCREAMING_SNAKE_CASE) @require_sentencepiece @require_tokenizers @require_torch class A__ ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self: int) -> List[Any]: """simple docstring""" __lowerCAmelCase : Optional[Any] = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli") __lowerCAmelCase : List[Any] = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]]) __lowerCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE)[0] __lowerCAmelCase : Any = torch.Size((1, 3)) self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE) __lowerCAmelCase : Union[str, Any] = torch.tensor([[0.6401, -0.0349, -0.6041]]) self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-4))
269
1
"""simple docstring""" # A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. def _lowerCAmelCase ( UpperCamelCase_ ): __SCREAMING_SNAKE_CASE = [False] * len(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = [-1] * len(UpperCamelCase_ ) def dfs(UpperCamelCase_ , UpperCamelCase_ ): __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = c for u in graph[v]: if not visited[u]: dfs(UpperCamelCase_ , 1 - c ) for i in range(len(UpperCamelCase_ ) ): if not visited[i]: dfs(UpperCamelCase_ , 0 ) for i in range(len(UpperCamelCase_ ) ): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph __magic_name__ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
255
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): """simple docstring""" @slow def snake_case_ ( self): __SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""") __SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""google/mt5-small""") __SCREAMING_SNAKE_CASE = tokenizer("""Hello there""" , return_tensors="""tf""").input_ids __SCREAMING_SNAKE_CASE = tokenizer("""Hi I am""" , return_tensors="""tf""").input_ids __SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , labels=lowerCAmelCase__).loss __SCREAMING_SNAKE_CASE = -tf.math.reduce_mean(lowerCAmelCase__).numpy() __SCREAMING_SNAKE_CASE = -21.22_81_68 self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 2E-4)
255
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = { '''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''', '''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''', '''xlm-roberta-large-finetuned-conll02-dutch''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll02-spanish''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll03-english''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll03-german''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json''' ), } class lowerCAmelCase ( A ): lowerCAmelCase_ = "xlm-roberta" def __init__( self : str , __lowercase : int=30522 , __lowercase : Tuple=768 , __lowercase : str=12 , __lowercase : str=12 , __lowercase : Union[str, Any]=3072 , __lowercase : Optional[int]="gelu" , __lowercase : str=0.1 , __lowercase : str=0.1 , __lowercase : Optional[int]=512 , __lowercase : Tuple=2 , __lowercase : Tuple=0.0_2 , __lowercase : str=1E-12 , __lowercase : Any=1 , __lowercase : Union[str, Any]=0 , __lowercase : List[Any]=2 , __lowercase : List[str]="absolute" , __lowercase : List[str]=True , __lowercase : int=None , **__lowercase : List[str] , ): """simple docstring""" super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase ) __lowercase =vocab_size __lowercase =hidden_size __lowercase =num_hidden_layers __lowercase =num_attention_heads __lowercase =hidden_act __lowercase =intermediate_size __lowercase =hidden_dropout_prob __lowercase =attention_probs_dropout_prob __lowercase =max_position_embeddings __lowercase =type_vocab_size __lowercase =initializer_range __lowercase =layer_norm_eps __lowercase =position_embedding_type __lowercase =use_cache __lowercase =classifier_dropout class lowerCAmelCase ( A ): @property def snake_case ( self : List[str] ): """simple docstring""" if self.task == "multiple-choice": __lowercase ={0: 'batch', 1: 'choice', 2: 'sequence'} else: __lowercase ={0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
141
'''simple docstring''' from collections.abc import Sequence def __UpperCamelCase ( lowercase__ : Sequence[float], lowercase__ : float ): '''simple docstring''' return sum(c * (x**i) for i, c in enumerate(lowercase__ ) ) def __UpperCamelCase ( lowercase__ : Sequence[float], lowercase__ : float ): '''simple docstring''' __lowercase =0.0 for coeff in reversed(lowercase__ ): __lowercase =result * x + coeff return result if __name__ == "__main__": UpperCAmelCase = (0.0, 0.0, 5.0, 9.3, 7.0) UpperCAmelCase = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
141
1
def UpperCamelCase_( _snake_case : int ): """simple docstring""" __a =n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
368
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , ) @pytest.mark.usefixtures('sm_env' ) @parameterized_class( [ { 'framework': 'pytorch', 'script': 'run_glue.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.p3.16xlarge', 'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6}, }, { 'framework': 'pytorch', 'script': 'run_ddp.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.p3.16xlarge', 'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6}, }, { 'framework': 'tensorflow', 'script': 'run_tf_dist.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.p3.16xlarge', 'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7}, }, ] ) class __magic_name__ ( unittest.TestCase ): def __magic_name__ ( self ) -> int: '''simple docstring''' if self.framework == "pytorch": subprocess.run( f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='utf-8' , check=__snake_case , ) assert hasattr(self , 'env' ) def __magic_name__ ( self , __snake_case ) -> int: '''simple docstring''' __a =f'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}' # distributed data settings __a ={'smdistributed': {'dataparallel': {'enabled': True}}} if self.script != 'run_ddp.py' else None # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__snake_case , instance_count=__snake_case , instance_type=self.instance_type , debugger_hook_config=__snake_case , hyperparameters={**self.env.distributed_hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__snake_case , py_version='py36' , ) def __magic_name__ ( self , __snake_case ) -> Optional[Any]: '''simple docstring''' TrainingJobAnalytics(__snake_case ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' ) @parameterized.expand([(2,)] ) def __magic_name__ ( self , __snake_case ) -> Optional[int]: '''simple docstring''' # create estimator __a =self.create_estimator(__snake_case ) # run training estimator.fit() # result dataframe __a =TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis __a =list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] ) __a =list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping __a =( Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 99_9999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy ) assert all(t <= self.results['eval_loss'] for t in eval_loss ) # dump tests result into json file to share in PR with open(f'{estimator.latest_training_job.name}.json' , 'w' ) as outfile: json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , __snake_case )
308
0
import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def _a ( a :Features ) -> Optional[int]: a = np.inf def set_batch_size(a :FeatureType ) -> None: nonlocal batch_size if isinstance(a , a ): a = min(a , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(a , a ): a = min(a , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(a , a ) and feature.dtype == "binary": a = min(a , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(a , a ) return None if batch_size is np.inf else batch_size class lowercase_ ( lowercase ): '''simple docstring''' def __init__( self : str , __UpperCAmelCase : NestedDataStructureLike[PathLike] , __UpperCAmelCase : Optional[NamedSplit] = None , __UpperCAmelCase : Optional[Features] = None , __UpperCAmelCase : str = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[int] = None , **__UpperCAmelCase : List[Any] , ) ->List[Any]: """simple docstring""" super().__init__( __UpperCAmelCase , split=__UpperCAmelCase , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase , streaming=__UpperCAmelCase , num_proc=__UpperCAmelCase , **__UpperCAmelCase , ) a = path_or_paths if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else {self.split: path_or_paths} a = _PACKAGED_DATASETS_MODULES['''parquet'''][1] a = Parquet( cache_dir=__UpperCAmelCase , data_files=__UpperCAmelCase , features=__UpperCAmelCase , hash=__UpperCAmelCase , **__UpperCAmelCase , ) def __lowerCAmelCase ( self : List[Any] ) ->Optional[int]: """simple docstring""" if self.streaming: a = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: a = None a = None a = None a = None self.builder.download_and_prepare( download_config=__UpperCAmelCase , download_mode=__UpperCAmelCase , verification_mode=__UpperCAmelCase , base_path=__UpperCAmelCase , num_proc=self.num_proc , ) a = self.builder.as_dataset( split=self.split , verification_mode=__UpperCAmelCase , in_memory=self.keep_in_memory ) return dataset class lowercase_ : '''simple docstring''' def __init__( self : Union[str, Any] , __UpperCAmelCase : Dataset , __UpperCAmelCase : Union[PathLike, BinaryIO] , __UpperCAmelCase : Optional[int] = None , **__UpperCAmelCase : List[str] , ) ->Any: """simple docstring""" a = dataset a = path_or_buf a = batch_size or get_writer_batch_size(dataset.features ) a = parquet_writer_kwargs def __lowerCAmelCase ( self : Tuple ) ->int: """simple docstring""" a = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with open(self.path_or_buf , '''wb+''' ) as buffer: a = self._write(file_obj=__UpperCAmelCase , batch_size=__UpperCAmelCase , **self.parquet_writer_kwargs ) else: a = self._write(file_obj=self.path_or_buf , batch_size=__UpperCAmelCase , **self.parquet_writer_kwargs ) return written def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : BinaryIO , __UpperCAmelCase : int , **__UpperCAmelCase : List[str] ) ->int: """simple docstring""" a = 0 a = parquet_writer_kwargs.pop('''path_or_buf''' , __UpperCAmelCase ) a = self.dataset.features.arrow_schema a = pq.ParquetWriter(__UpperCAmelCase , schema=__UpperCAmelCase , **__UpperCAmelCase ) for offset in logging.tqdm( range(0 , len(self.dataset ) , __UpperCAmelCase ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ): a = query_table( table=self.dataset._data , key=slice(__UpperCAmelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(__UpperCAmelCase ) written += batch.nbytes writer.close() return written
0
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.test_utils import execute_subprocess_async def __lowerCAmelCase ( snake_case__=None ): if subparsers is not None: __UpperCamelCase : Any = subparsers.add_parser("test" ) else: __UpperCamelCase : Dict = argparse.ArgumentParser("Accelerate test command" ) parser.add_argument( "--config_file" , default=snake_case__ , help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ) , ) if subparsers is not None: parser.set_defaults(func=snake_case__ ) return parser def __lowerCAmelCase ( snake_case__ ): __UpperCamelCase : str = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] ) if args.config_file is None: __UpperCamelCase : str = script_name else: __UpperCamelCase : Tuple = F"--config_file={args.config_file} {script_name}" __UpperCamelCase : Optional[Any] = ["accelerate-launch"] + test_args.split() __UpperCamelCase : Optional[Any] = execute_subprocess_async(snake_case__ , env=os.environ.copy() ) if result.returncode == 0: print("Test is a success! You are ready for your distributed training!" ) def __lowerCAmelCase ( ): __UpperCamelCase : int = test_command_parser() __UpperCamelCase : Union[str, Any] = parser.parse_args() test_command(snake_case__ ) if __name__ == "__main__": main()
298
0
import heapq as hq import math from collections.abc import Iterator class A : def __init__(self : Union[str, Any] , __UpperCAmelCase : List[str] ) -> List[Any]: """simple docstring""" UpperCAmelCase__ = str(id_ ) UpperCAmelCase__ = None UpperCAmelCase__ = None UpperCAmelCase__ = [] UpperCAmelCase__ = {} # {vertex:distance} def __lt__(self : List[str] , __UpperCAmelCase : Tuple ) -> Tuple: """simple docstring""" return self.key < other.key def __repr__(self : List[str] ) -> Any: """simple docstring""" return self.id def lowercase_ (self : List[str] , __UpperCAmelCase : Union[str, Any] ) -> int: """simple docstring""" self.neighbors.append(__UpperCAmelCase ) def lowercase_ (self : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase__ = weight def lowerCAmelCase_ ( __A, __A, __A, __A ) -> List[str]: '''simple docstring''' graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1], __A ) graph[b - 1].add_edge(graph[a - 1], __A ) def lowerCAmelCase_ ( __A, __A ) -> list: '''simple docstring''' UpperCAmelCase__ = [] for u in graph: UpperCAmelCase__ = math.inf UpperCAmelCase__ = None UpperCAmelCase__ = 0 UpperCAmelCase__ = graph[:] while q: UpperCAmelCase__ = min(__A ) q.remove(__A ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): UpperCAmelCase__ = u UpperCAmelCase__ = u.edges[v.id] for i in range(1, len(__A ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def lowerCAmelCase_ ( __A, __A ) -> Iterator[tuple]: '''simple docstring''' for u in graph: UpperCAmelCase__ = math.inf UpperCAmelCase__ = None UpperCAmelCase__ = 0 UpperCAmelCase__ = list(__A ) hq.heapify(__A ) while h: UpperCAmelCase__ = hq.heappop(__A ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): UpperCAmelCase__ = u UpperCAmelCase__ = u.edges[v.id] hq.heapify(__A ) for i in range(1, len(__A ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def lowerCAmelCase_ ( ) -> None: '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
143
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_pegasus import PegasusTokenizer else: UpperCamelCase__ = None UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = '▁' UpperCamelCase__ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} UpperCamelCase__ = { 'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'}, 'tokenizer_file': { 'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json' }, } UpperCamelCase__ = { 'google/pegasus-xsum': 5_1_2, } class A ( UpperCAmelCase_ ): __UpperCAmelCase : str = VOCAB_FILES_NAMES __UpperCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCAmelCase : Union[str, Any] = PegasusTokenizer __UpperCAmelCase : Any = ['input_ids', 'attention_mask'] def __init__(self : Optional[int] , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Any=None , __UpperCAmelCase : Union[str, Any]="<pad>" , __UpperCAmelCase : List[str]="</s>" , __UpperCAmelCase : Union[str, Any]="<unk>" , __UpperCAmelCase : int="<mask_2>" , __UpperCAmelCase : Optional[Any]="<mask_1>" , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : str=1_0_3 , **__UpperCAmelCase : str , ) -> Optional[Any]: """simple docstring""" UpperCAmelCase__ = offset if additional_special_tokens is not None: if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise TypeError( f"""additional_special_tokens should be of type {type(__UpperCAmelCase )}, but is""" f""" {type(__UpperCAmelCase )}""" ) UpperCAmelCase__ = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f"""<unk_{i}>""" for i in range(len(__UpperCAmelCase ) , self.offset - 1 ) ] if len(set(__UpperCAmelCase ) ) != len(__UpperCAmelCase ): raise ValueError( "Please make sure that the provided additional_special_tokens do not contain an incorrectly" f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" ) UpperCAmelCase__ = additional_special_tokens_extended else: UpperCAmelCase__ = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )] super().__init__( __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , pad_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , mask_token_sent=__UpperCAmelCase , offset=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , ) UpperCAmelCase__ = vocab_file UpperCAmelCase__ = False if not self.vocab_file else True def lowercase_ (self : List[Any] , __UpperCAmelCase : Tuple ) -> int: """simple docstring""" UpperCAmelCase__ = set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ): raise ValueError( "There should be 3 special tokens: mask_token, pad_token, and eos_token +" f""" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}""" ) return [1 if x in all_special_ids else 0 for x in seq] def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : List , __UpperCAmelCase : Optional[List] = None , __UpperCAmelCase : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return self._special_token_mask(__UpperCAmelCase ) elif token_ids_a is None: return self._special_token_mask(__UpperCAmelCase ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def lowercase_ (self : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any]=None ) -> List[int]: """simple docstring""" if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def lowercase_ (self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(__UpperCAmelCase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCAmelCase__ = os.path.join( __UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ): copyfile(self.vocab_file , __UpperCAmelCase ) return (out_vocab_file,)
143
1
"""simple docstring""" import math def _snake_case ( ): lowerCAmelCase : Union[str, Any] = input('''Enter message: ''' ) lowerCAmelCase : Optional[int] = int(input(f'''Enter key [2-{len(_snake_case ) - 1}]: ''' ) ) lowerCAmelCase : str = input('''Encryption/Decryption [e/d]: ''' ) if mode.lower().startswith('''e''' ): lowerCAmelCase : Any = encrypt_message(_snake_case , _snake_case ) elif mode.lower().startswith('''d''' ): lowerCAmelCase : Union[str, Any] = decrypt_message(_snake_case , _snake_case ) # Append pipe symbol (vertical bar) to identify spaces at the end. print(f'''Output:\n{text + "|"}''' ) def _snake_case ( _snake_case : int , _snake_case : str ): lowerCAmelCase : Optional[Any] = [''''''] * key for col in range(_snake_case ): lowerCAmelCase : Optional[Any] = col while pointer < len(_snake_case ): cipher_text[col] += message[pointer] pointer += key return "".join(_snake_case ) def _snake_case ( _snake_case : int , _snake_case : str ): lowerCAmelCase : Union[str, Any] = math.ceil(len(_snake_case ) / key ) lowerCAmelCase : str = key lowerCAmelCase : Any = (num_cols * num_rows) - len(_snake_case ) lowerCAmelCase : Dict = [''''''] * num_cols lowerCAmelCase : int = 0 lowerCAmelCase : int = 0 for symbol in message: plain_text[col] += symbol col += 1 if ( (col == num_cols) or (col == num_cols - 1) and (row >= num_rows - num_shaded_boxes) ): lowerCAmelCase : int = 0 row += 1 return "".join(_snake_case ) if __name__ == "__main__": import doctest doctest.testmod() main()
60
"""simple docstring""" import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class snake_case_: def __init__( self : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Dict=1_3 , UpperCamelCase_ : Union[str, Any]=3_2 , UpperCamelCase_ : str=2 , UpperCamelCase_ : int=3 , UpperCamelCase_ : Any=1_6 , UpperCamelCase_ : int=[1, 2, 1] , UpperCamelCase_ : Optional[int]=[2, 2, 4] , UpperCamelCase_ : Any=2 , UpperCamelCase_ : Any=2.0 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : int=0.0 , UpperCamelCase_ : Optional[Any]=0.0 , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : Tuple="gelu" , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Any=True , UpperCamelCase_ : List[Any]=0.02 , UpperCamelCase_ : Tuple=1E-5 , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : str=True , UpperCamelCase_ : List[Any]=1_0 , UpperCamelCase_ : Dict=8 , ): lowerCAmelCase : Union[str, Any] = parent lowerCAmelCase : int = batch_size lowerCAmelCase : List[str] = image_size lowerCAmelCase : Union[str, Any] = patch_size lowerCAmelCase : int = num_channels lowerCAmelCase : Any = embed_dim lowerCAmelCase : Any = depths lowerCAmelCase : Any = num_heads lowerCAmelCase : int = window_size lowerCAmelCase : List[Any] = mlp_ratio lowerCAmelCase : int = qkv_bias lowerCAmelCase : Optional[Any] = hidden_dropout_prob lowerCAmelCase : str = attention_probs_dropout_prob lowerCAmelCase : str = drop_path_rate lowerCAmelCase : Union[str, Any] = hidden_act lowerCAmelCase : int = use_absolute_embeddings lowerCAmelCase : Union[str, Any] = patch_norm lowerCAmelCase : int = layer_norm_eps lowerCAmelCase : str = initializer_range lowerCAmelCase : Optional[int] = is_training lowerCAmelCase : int = scope lowerCAmelCase : List[str] = use_labels lowerCAmelCase : str = type_sequence_label_size lowerCAmelCase : Union[str, Any] = encoder_stride def lowerCamelCase__ ( self : Any ): lowerCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase : Union[str, Any] = None if self.use_labels: lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase : Tuple = self.get_config() return config, pixel_values, labels def lowerCamelCase__ ( self : List[Any] ): return SwinvaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : str , UpperCamelCase_ : Dict ): lowerCAmelCase : List[str] = SwinvaModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : List[str] = model(UpperCamelCase_ ) lowerCAmelCase : Tuple = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) lowerCAmelCase : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int] ): lowerCAmelCase : Tuple = SwinvaForMaskedImageModeling(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : Dict = model(UpperCamelCase_ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowerCAmelCase : List[Any] = 1 lowerCAmelCase : List[str] = SwinvaForMaskedImageModeling(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCAmelCase : int = model(UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : int ): lowerCAmelCase : List[str] = self.type_sequence_label_size lowerCAmelCase : Optional[Any] = SwinvaForImageClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : Optional[int] = model(UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCamelCase__ ( self : str ): lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs() lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : str = config_and_inputs lowerCAmelCase : Dict = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class snake_case_( a__ , a__ , unittest.TestCase ): __UpperCamelCase = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) __UpperCamelCase = ( {'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification} if is_torch_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False def lowerCamelCase__ ( self : int ): lowerCAmelCase : Dict = SwinvaModelTester(self ) lowerCAmelCase : List[str] = ConfigTester(self , config_class=UpperCamelCase_ , embed_dim=3_7 ) def lowerCamelCase__ ( self : Optional[int] ): self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_ ) @unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' ) def lowerCamelCase__ ( self : Dict ): pass @unittest.skip(reason='''Swinv2 does not use inputs_embeds''' ) def lowerCamelCase__ ( self : int ): pass def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase, lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase : Dict = model_class(UpperCamelCase_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCAmelCase : str = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase_ , nn.Linear ) ) def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase : Tuple = model_class(UpperCamelCase_ ) lowerCAmelCase : Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase : Optional[int] = [*signature.parameters.keys()] lowerCAmelCase : int = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCamelCase_ ) def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase, lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase : Optional[Any] = True for model_class in self.all_model_classes: lowerCAmelCase : Any = True lowerCAmelCase : List[str] = False lowerCAmelCase : int = True lowerCAmelCase : int = model_class(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() with torch.no_grad(): lowerCAmelCase : Optional[Any] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase : str = outputs.attentions lowerCAmelCase : int = len(self.model_tester.depths ) self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowerCAmelCase : Any = True lowerCAmelCase : Union[str, Any] = config.window_size**2 lowerCAmelCase : int = model_class(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() with torch.no_grad(): lowerCAmelCase : Optional[int] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase : Dict = outputs.attentions self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) lowerCAmelCase : str = len(UpperCamelCase_ ) # Check attention is always last and order is fine lowerCAmelCase : Optional[int] = True lowerCAmelCase : int = True lowerCAmelCase : Optional[Any] = model_class(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() with torch.no_grad(): lowerCAmelCase : Tuple = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) ) if hasattr(self.model_tester , '''num_hidden_states_types''' ): lowerCAmelCase : List[Any] = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states lowerCAmelCase : Union[str, Any] = 2 self.assertEqual(out_len + added_hidden_states , len(UpperCamelCase_ ) ) lowerCAmelCase : List[str] = outputs.attentions self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) def lowerCamelCase__ ( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] ): lowerCAmelCase : int = model_class(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() with torch.no_grad(): lowerCAmelCase : Union[str, Any] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase : str = outputs.hidden_states lowerCAmelCase : List[str] = getattr( self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ ) # Swinv2 has a different seq_length lowerCAmelCase : Any = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCAmelCase : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) lowerCAmelCase : List[str] = outputs.reshaped_hidden_states self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ ) lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : str = reshaped_hidden_states[0].shape lowerCAmelCase : Optional[Any] = ( reshaped_hidden_states[0].view(UpperCamelCase_ , UpperCamelCase_ , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase : Any = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: lowerCAmelCase : Union[str, Any] = True self.check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase : Tuple = True self.check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase : Dict = 3 lowerCAmelCase : Dict = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) lowerCAmelCase : Dict = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCAmelCase : List[str] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) lowerCAmelCase : Tuple = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: lowerCAmelCase : str = True self.check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase : Optional[int] = True self.check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , (padded_height, padded_width) ) def lowerCamelCase__ ( self : int ): lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase_ ) def lowerCamelCase__ ( self : str ): lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ ) @slow def lowerCamelCase__ ( self : int ): for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase : int = SwinvaModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase, lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase : Union[str, Any] = _config_zero_init(UpperCamelCase_ ) for model_class in self.all_model_classes: lowerCAmelCase : Union[str, Any] = model_class(config=UpperCamelCase_ ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @require_vision @require_torch class snake_case_( unittest.TestCase ): @cached_property def lowerCamelCase__ ( self : Dict ): return ( AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ) if is_vision_available() else None ) @slow def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : str = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to( UpperCamelCase_ ) lowerCAmelCase : List[Any] = self.default_image_processor lowerCAmelCase : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase : Union[str, Any] = image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ ) # forward pass with torch.no_grad(): lowerCAmelCase : Dict = model(**UpperCamelCase_ ) # verify the logits lowerCAmelCase : List[Any] = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , UpperCamelCase_ ) lowerCAmelCase : Any = torch.tensor([-0.3_947, -0.4_306, 0.0_026] ).to(UpperCamelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1E-4 ) )
60
1
import argparse import torch from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel from transformers.utils import logging logging.set_verbosity_info() def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Any: """simple docstring""" A__ = FunnelConfig.from_json_file(lowercase_ ) print(f"""Building PyTorch model from configuration: {config}""" ) A__ = FunnelBaseModel(lowercase_ ) if base_model else FunnelModel(lowercase_ ) # Load weights from tf checkpoint load_tf_weights_in_funnel(lowercase_ , lowercase_ , lowercase_ ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , lowercase_ ) if __name__ == "__main__": _lowerCamelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--base_model""", action="""store_true""", help="""Whether you want just the base model (no decoder) or not.""" ) _lowerCamelCase : Optional[int] = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model )
231
from __future__ import annotations import csv import requests from bsa import BeautifulSoup def SCREAMING_SNAKE_CASE ( lowercase_ = "" ) -> dict[str, float]: """simple docstring""" A__ = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250''' A__ = BeautifulSoup(requests.get(lowercase_ ).text , '''html.parser''' ) A__ = soup.find_all('''td''' , attrs='''titleColumn''' ) A__ = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' ) return { title.a.text: float(rating.strong.text ) for title, rating in zip(lowercase_ , lowercase_ ) } def SCREAMING_SNAKE_CASE ( lowercase_ = "IMDb_Top_250_Movies.csv" ) -> None: """simple docstring""" A__ = get_imdb_top_aaa_movies() with open(lowercase_ , '''w''' , newline='''''' ) as out_file: A__ = csv.writer(lowercase_ ) writer.writerow(['''Movie title''', '''IMDb rating'''] ) for title, rating in movies.items(): writer.writerow([title, rating] ) if __name__ == "__main__": write_movies()
231
1
'''simple docstring''' def snake_case_ ( _lowerCAmelCase : Any ) -> List[str]: if upper_limit < 0: raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' ) UpperCAmelCase : int = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 UpperCAmelCase : int = 1 if upper_limit > 0: UpperCAmelCase : Optional[Any] = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(__lowerCAmelCase ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print("\n********* Catalan Numbers Using Dynamic Programming ************\n") print("\n*** Enter -1 at any time to quit ***") print("\nEnter the upper limit (≥ 0) for the Catalan number sequence: ", end="") try: while True: UpperCamelCase__: Optional[Any] = int(input().strip()) if N < 0: print("\n********* Goodbye!! ************") break else: print(F"The Catalan numbers from 0 through {N} are:") print(catalan_numbers(N)) print("Try another upper limit for the sequence: ", end="") except (NameError, ValueError): print("\n********* Invalid input, goodbye! ************\n") import doctest doctest.testmod()
23
'''simple docstring''' import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def __lowerCAmelCase (__lowerCAmelCase ): _UpperCAmelCase : List[str] = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "decoder.output_projection.weight", "_float_tensor", "encoder.embed_positions._float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) def __lowerCAmelCase (__lowerCAmelCase ): _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = emb.weight.shape _UpperCAmelCase : str = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase ) _UpperCAmelCase : Optional[int] = emb.weight.data return lin_layer def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase=None ): _UpperCAmelCase : int = {} for old_key in state_dict.keys(): _UpperCAmelCase : Tuple = old_key if "moe_layer.experts." in key: if expert_idx is not None: _UpperCAmelCase : Optional[int] = key.replace("moe_layer.experts.0" , F"""ffn.experts.expert_{expert_idx}""" ) else: _UpperCAmelCase : Any = key.replace("moe_layer.experts." , "ffn.experts.expert_" ) if "gate" in key: _UpperCAmelCase : List[Any] = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" ) if "fc2" and "experts" not in key: _UpperCAmelCase : Tuple = key.replace(".fc2." , ".ffn.fc2." ) if "fc1" and "experts" not in key: _UpperCAmelCase : List[Any] = key.replace(".fc1." , ".ffn.fc1." ) if ".encoder_attn." in key: _UpperCAmelCase : List[Any] = key.replace(".encoder_attn." , ".cross_attention." ) if "encoder_attn_layer_norm" in key: _UpperCAmelCase : Any = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" ) if "final_layer_norm" in key: _UpperCAmelCase : int = key.replace("final_layer_norm" , "ff_layer_norm" ) _UpperCAmelCase : Tuple = state_dict[old_key] return new_dict def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = WEIGHTS_NAME ): _UpperCAmelCase : Optional[int] = [] _UpperCAmelCase : Optional[Any] = 0 os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase ) for expert in range(__lowerCAmelCase ): _UpperCAmelCase : Tuple = switch_checkpoint_path + F"""-rank-{expert}.pt""" if os.path.isfile(__lowerCAmelCase ): _UpperCAmelCase : Tuple = torch.load(__lowerCAmelCase )["model"] remove_ignore_keys_(__lowerCAmelCase ) _UpperCAmelCase : Dict = rename_fairseq_keys(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase : List[str] = os.path.join( __lowerCAmelCase , weights_name.replace(".bin" , F"""-{len(__lowerCAmelCase )+1:05d}-of-???.bin""" ) ) torch.save(__lowerCAmelCase , __lowerCAmelCase ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(__lowerCAmelCase )[0]].dtype ) # Add the last block _UpperCAmelCase : Tuple = os.path.join(__lowerCAmelCase , weights_name.replace(".bin" , F"""-{len(__lowerCAmelCase )+1:05d}-of-???.bin""" ) ) _UpperCAmelCase : Union[str, Any] = torch.load(switch_checkpoint_path + "-shared.pt" )["model"] remove_ignore_keys_(__lowerCAmelCase ) _UpperCAmelCase : Union[str, Any] = rename_fairseq_keys(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase : Any = shared_weights["decoder.embed_tokens.weight"] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(__lowerCAmelCase ) == 1: _UpperCAmelCase : List[str] = os.path.join(__lowerCAmelCase , __lowerCAmelCase ) torch.save(__lowerCAmelCase , __lowerCAmelCase ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(__lowerCAmelCase , __lowerCAmelCase ) # Otherwise, let's build the index _UpperCAmelCase : Union[str, Any] = {} for idx, shard in enumerate(__lowerCAmelCase ): _UpperCAmelCase : Tuple = weights_name.replace(".bin" , F"""-{idx+1:05d}-of-{len(__lowerCAmelCase ):05d}.bin""" ) _UpperCAmelCase : List[Any] = os.path.join(__lowerCAmelCase , weights_name.replace(".bin" , F"""-{idx+1:05d}-of-???.bin""" ) ) os.rename(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) ) for key in shard: _UpperCAmelCase : List[Any] = shard_file # Add the metadata _UpperCAmelCase : Any = {"total_size": total_size} _UpperCAmelCase : List[str] = {"metadata": metadata, "weight_map": weight_map} with open(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , "w" , encoding="utf-8" ) as f: _UpperCAmelCase : Tuple = json.dumps(__lowerCAmelCase , indent=2 , sort_keys=__lowerCAmelCase ) + "\n" f.write(__lowerCAmelCase ) return metadata, index if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--nllb_moe_checkpoint_path', default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000', type=str, required=False, help='Path to a directory containing a folder per layer. Follows the original Google format.', ) parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model') parser.add_argument( '--pytorch_dump_folder_path', default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b', type=str, required=False, help='Path to the output pytorch model.', ) lowerCamelCase__ = parser.parse_args() lowerCamelCase__ ,lowerCamelCase__ = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 128, args.dtype, ) lowerCamelCase__ = NllbMoeConfig.from_pretrained( 'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128 ) config.save_pretrained(args.pytorch_dump_folder_path) lowerCamelCase__ = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print('Done') model.save_pretrained(args.pytorch_dump_folder_path)
234
0
def lowerCAmelCase_ ( )-> Tuple: '''simple docstring''' UpperCAmelCase : Union[str, Any] =[] UpperCAmelCase : Tuple =1 while len(__lowerCAmelCase ) < 1e6: constant.append(str(__lowerCAmelCase ) ) i += 1 UpperCAmelCase : Dict =''''''.join(__lowerCAmelCase ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[99] ) * int(constant[9_99] ) * int(constant[99_99] ) * int(constant[9_99_99] ) * int(constant[99_99_99] ) ) if __name__ == "__main__": print(solution())
78
import sys def lowerCAmelCase_ ( __lowerCAmelCase )-> Any: '''simple docstring''' UpperCAmelCase : Optional[Any] =len(__lowerCAmelCase ) UpperCAmelCase : List[str] =[[0 for x in range(__lowerCAmelCase )] for x in range(__lowerCAmelCase )] UpperCAmelCase : List[Any] =[[0 for x in range(__lowerCAmelCase )] for x in range(__lowerCAmelCase )] for chain_length in range(2 , __lowerCAmelCase ): for a in range(1 , n - chain_length + 1 ): UpperCAmelCase : str =a + chain_length - 1 UpperCAmelCase : Union[str, Any] =sys.maxsize for c in range(__lowerCAmelCase , __lowerCAmelCase ): UpperCAmelCase : List[Any] =( matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b] ) if cost < matrix[a][b]: UpperCAmelCase : Optional[Any] =cost UpperCAmelCase : Dict =c return matrix, sol def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Union[str, Any]: '''simple docstring''' if i == j: print('''A''' + str(__lowerCAmelCase ) , end=''' ''' ) else: print('''(''' , end=''' ''' ) print_optiomal_solution(__lowerCAmelCase , __lowerCAmelCase , optimal_solution[i][j] ) print_optiomal_solution(__lowerCAmelCase , optimal_solution[i][j] + 1 , __lowerCAmelCase ) print(''')''' , end=''' ''' ) def lowerCAmelCase_ ( )-> List[str]: '''simple docstring''' UpperCAmelCase : Dict =[30, 35, 15, 5, 10, 20, 25] UpperCAmelCase : Optional[Any] =len(__lowerCAmelCase ) # Size of matrix created from above array will be # 30*35 35*15 15*5 5*10 10*20 20*25 UpperCAmelCase , UpperCAmelCase : Optional[int] =matrix_chain_order(__lowerCAmelCase ) print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) ) print_optiomal_solution(__lowerCAmelCase , 1 , n - 1 ) if __name__ == "__main__": main()
78
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__: int = logging.get_logger(__name__) UpperCamelCase__: List[Any] = { "naver-clova-ix/donut-base": "https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json", # See all Donut models at https://huggingface.co/models?filter=donut-swin } class SCREAMING_SNAKE_CASE( A__ ): """simple docstring""" lowerCamelCase__ = """donut-swin""" lowerCamelCase__ = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self : Union[str, Any] , __snake_case : List[str]=224 , __snake_case : Optional[int]=4 , __snake_case : int=3 , __snake_case : Tuple=96 , __snake_case : Union[str, Any]=[2, 2, 6, 2] , __snake_case : str=[3, 6, 12, 24] , __snake_case : List[str]=7 , __snake_case : str=4.0 , __snake_case : Tuple=True , __snake_case : Union[str, Any]=0.0 , __snake_case : str=0.0 , __snake_case : Optional[Any]=0.1 , __snake_case : Union[str, Any]="gelu" , __snake_case : str=False , __snake_case : Optional[int]=0.02 , __snake_case : List[Any]=1E-5 , **__snake_case : int , ) -> Optional[Any]: super().__init__(**__snake_case ) UpperCAmelCase : Any = image_size UpperCAmelCase : List[str] = patch_size UpperCAmelCase : List[Any] = num_channels UpperCAmelCase : Union[str, Any] = embed_dim UpperCAmelCase : Union[str, Any] = depths UpperCAmelCase : Any = len(__snake_case ) UpperCAmelCase : Union[str, Any] = num_heads UpperCAmelCase : Any = window_size UpperCAmelCase : Tuple = mlp_ratio UpperCAmelCase : Optional[Any] = qkv_bias UpperCAmelCase : Any = hidden_dropout_prob UpperCAmelCase : str = attention_probs_dropout_prob UpperCAmelCase : Dict = drop_path_rate UpperCAmelCase : List[Any] = hidden_act UpperCAmelCase : Union[str, Any] = use_absolute_embeddings UpperCAmelCase : Tuple = layer_norm_eps UpperCAmelCase : Optional[int] = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model UpperCAmelCase : str = int(embed_dim * 2 ** (len(__snake_case ) - 1) )
23
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase__: Optional[int] = logging.get_logger(__name__) def snake_case_ ( _lowerCAmelCase : Optional[int] ) -> Optional[int]: UpperCAmelCase : Tuple = DPTConfig(embedding_type='''hybrid''' ) if "large" in checkpoint_url: UpperCAmelCase : Tuple = 1024 UpperCAmelCase : List[Any] = 4096 UpperCAmelCase : str = 24 UpperCAmelCase : List[Any] = 16 UpperCAmelCase : str = [5, 11, 17, 23] UpperCAmelCase : List[Any] = [256, 512, 1024, 1024] UpperCAmelCase : Tuple = (1, 384, 384) if "nyu" or "midas" in checkpoint_url: UpperCAmelCase : Optional[Any] = 768 UpperCAmelCase : Tuple = [1, 1, 1, 0.5] UpperCAmelCase : int = [256, 512, 768, 768] UpperCAmelCase : Any = 150 UpperCAmelCase : Tuple = 16 UpperCAmelCase : Any = (1, 384, 384) UpperCAmelCase : Optional[Any] = False UpperCAmelCase : Tuple = '''project''' if "ade" in checkpoint_url: UpperCAmelCase : Any = True UpperCAmelCase : str = 768 UpperCAmelCase : Optional[int] = [1, 1, 1, 0.5] UpperCAmelCase : List[Any] = 150 UpperCAmelCase : List[Any] = 16 UpperCAmelCase : str = '''huggingface/label-files''' UpperCAmelCase : Tuple = '''ade20k-id2label.json''' UpperCAmelCase : Any = json.load(open(cached_download(hf_hub_url(_lowerCAmelCase , _lowerCAmelCase , repo_type='''dataset''' ) ) , '''r''' ) ) UpperCAmelCase : Optional[Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()} UpperCAmelCase : List[Any] = idalabel UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()} UpperCAmelCase : Union[str, Any] = [1, 150, 480, 480] return config, expected_shape def snake_case_ ( _lowerCAmelCase : Union[str, Any] ) -> int: UpperCAmelCase : List[str] = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias'''] for k in ignore_keys: state_dict.pop(_lowerCAmelCase , _lowerCAmelCase ) def snake_case_ ( _lowerCAmelCase : Tuple ) -> Any: if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): UpperCAmelCase : Tuple = name.replace('''pretrained.model''' , '''dpt.encoder''' ) if "pretrained.model" in name: UpperCAmelCase : Union[str, Any] = name.replace('''pretrained.model''' , '''dpt.embeddings''' ) if "patch_embed" in name: UpperCAmelCase : int = name.replace('''patch_embed''' , '''''' ) if "pos_embed" in name: UpperCAmelCase : Tuple = name.replace('''pos_embed''' , '''position_embeddings''' ) if "attn.proj" in name: UpperCAmelCase : Any = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "proj" in name and "project" not in name: UpperCAmelCase : str = name.replace('''proj''' , '''projection''' ) if "blocks" in name: UpperCAmelCase : Any = name.replace('''blocks''' , '''layer''' ) if "mlp.fc1" in name: UpperCAmelCase : Optional[int] = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: UpperCAmelCase : Optional[Any] = name.replace('''mlp.fc2''' , '''output.dense''' ) if "norm1" in name and "backbone" not in name: UpperCAmelCase : Dict = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name and "backbone" not in name: UpperCAmelCase : Tuple = name.replace('''norm2''' , '''layernorm_after''' ) if "scratch.output_conv" in name: UpperCAmelCase : Tuple = name.replace('''scratch.output_conv''' , '''head''' ) if "scratch" in name: UpperCAmelCase : str = name.replace('''scratch''' , '''neck''' ) if "layer1_rn" in name: UpperCAmelCase : Dict = name.replace('''layer1_rn''' , '''convs.0''' ) if "layer2_rn" in name: UpperCAmelCase : int = name.replace('''layer2_rn''' , '''convs.1''' ) if "layer3_rn" in name: UpperCAmelCase : Tuple = name.replace('''layer3_rn''' , '''convs.2''' ) if "layer4_rn" in name: UpperCAmelCase : int = name.replace('''layer4_rn''' , '''convs.3''' ) if "refinenet" in name: UpperCAmelCase : List[str] = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 UpperCAmelCase : str = name.replace(f"""refinenet{layer_idx}""" , f"""fusion_stage.layers.{abs(layer_idx-4 )}""" ) if "out_conv" in name: UpperCAmelCase : List[str] = name.replace('''out_conv''' , '''projection''' ) if "resConfUnit1" in name: UpperCAmelCase : Union[str, Any] = name.replace('''resConfUnit1''' , '''residual_layer1''' ) if "resConfUnit2" in name: UpperCAmelCase : Any = name.replace('''resConfUnit2''' , '''residual_layer2''' ) if "conv1" in name: UpperCAmelCase : Optional[int] = name.replace('''conv1''' , '''convolution1''' ) if "conv2" in name: UpperCAmelCase : Tuple = name.replace('''conv2''' , '''convolution2''' ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: UpperCAmelCase : Dict = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' ) if "pretrained.act_postprocess2.0.project.0" in name: UpperCAmelCase : int = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' ) if "pretrained.act_postprocess3.0.project.0" in name: UpperCAmelCase : Any = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' ) if "pretrained.act_postprocess4.0.project.0" in name: UpperCAmelCase : Optional[Any] = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' ) # resize blocks if "pretrained.act_postprocess1.3" in name: UpperCAmelCase : List[Any] = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' ) if "pretrained.act_postprocess1.4" in name: UpperCAmelCase : Any = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' ) if "pretrained.act_postprocess2.3" in name: UpperCAmelCase : Optional[int] = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' ) if "pretrained.act_postprocess2.4" in name: UpperCAmelCase : str = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' ) if "pretrained.act_postprocess3.3" in name: UpperCAmelCase : List[str] = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' ) if "pretrained.act_postprocess4.3" in name: UpperCAmelCase : Tuple = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' ) if "pretrained.act_postprocess4.4" in name: UpperCAmelCase : int = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' ) if "pretrained" in name: UpperCAmelCase : Optional[int] = name.replace('''pretrained''' , '''dpt''' ) if "bn" in name: UpperCAmelCase : Dict = name.replace('''bn''' , '''batch_norm''' ) if "head" in name: UpperCAmelCase : Any = name.replace('''head''' , '''head.head''' ) if "encoder.norm" in name: UpperCAmelCase : Optional[int] = name.replace('''encoder.norm''' , '''layernorm''' ) if "auxlayer" in name: UpperCAmelCase : Union[str, Any] = name.replace('''auxlayer''' , '''auxiliary_head.head''' ) if "backbone" in name: UpperCAmelCase : List[Any] = name.replace('''backbone''' , '''backbone.bit.encoder''' ) if ".." in name: UpperCAmelCase : Optional[int] = name.replace('''..''' , '''.''' ) if "stem.conv" in name: UpperCAmelCase : Optional[Any] = name.replace('''stem.conv''' , '''bit.embedder.convolution''' ) if "blocks" in name: UpperCAmelCase : Optional[int] = name.replace('''blocks''' , '''layers''' ) if "convolution" in name and "backbone" in name: UpperCAmelCase : List[Any] = name.replace('''convolution''' , '''conv''' ) if "layer" in name and "backbone" in name: UpperCAmelCase : List[str] = name.replace('''layer''' , '''layers''' ) if "backbone.bit.encoder.bit" in name: UpperCAmelCase : List[Any] = name.replace('''backbone.bit.encoder.bit''' , '''backbone.bit''' ) if "embedder.conv" in name: UpperCAmelCase : List[Any] = name.replace('''embedder.conv''' , '''embedder.convolution''' ) if "backbone.bit.encoder.stem.norm" in name: UpperCAmelCase : Tuple = name.replace('''backbone.bit.encoder.stem.norm''' , '''backbone.bit.embedder.norm''' ) return name def snake_case_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] ) -> Optional[Any]: for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) UpperCAmelCase : Optional[int] = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" ) UpperCAmelCase : Tuple = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase : Tuple = in_proj_weight[: config.hidden_size, :] UpperCAmelCase : int = in_proj_bias[: config.hidden_size] UpperCAmelCase : List[str] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCAmelCase : List[str] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] UpperCAmelCase : str = in_proj_weight[ -config.hidden_size :, : ] UpperCAmelCase : Union[str, Any] = in_proj_bias[-config.hidden_size :] def snake_case_ ( ) -> List[str]: UpperCAmelCase : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' UpperCAmelCase : Optional[int] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ) return im @torch.no_grad() def snake_case_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] ) -> Any: UpperCAmelCase , UpperCAmelCase : int = get_dpt_config(_lowerCAmelCase ) # load original state_dict from URL # state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu") UpperCAmelCase : List[Any] = torch.load(_lowerCAmelCase , map_location='''cpu''' ) # remove certain keys remove_ignore_keys_(_lowerCAmelCase ) # rename keys for key in state_dict.copy().keys(): UpperCAmelCase : Any = state_dict.pop(_lowerCAmelCase ) UpperCAmelCase : List[Any] = val # read in qkv matrices read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase ) # load HuggingFace model UpperCAmelCase : Optional[Any] = DPTForSemanticSegmentation(_lowerCAmelCase ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(_lowerCAmelCase ) model.load_state_dict(_lowerCAmelCase ) model.eval() # Check outputs on an image UpperCAmelCase : int = 480 if '''ade''' in checkpoint_url else 384 UpperCAmelCase : List[Any] = DPTImageProcessor(size=_lowerCAmelCase ) UpperCAmelCase : Dict = prepare_img() UpperCAmelCase : Optional[int] = image_processor(_lowerCAmelCase , return_tensors='''pt''' ) # forward pass UpperCAmelCase : Any = model(**_lowerCAmelCase ).logits if '''ade''' in checkpoint_url else model(**_lowerCAmelCase ).predicted_depth if show_prediction: UpperCAmelCase : Dict = ( torch.nn.functional.interpolate( outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='''bicubic''' , align_corners=_lowerCAmelCase , ) .squeeze() .cpu() .numpy() ) Image.fromarray((prediction / prediction.max()) * 255 ).show() if pytorch_dump_folder_path is not None: Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase ) print(f"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(_lowerCAmelCase ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_lowerCAmelCase ) if push_to_hub: model.push_to_hub('''ybelkada/dpt-hybrid-midas''' ) image_processor.push_to_hub('''ybelkada/dpt-hybrid-midas''' ) if __name__ == "__main__": UpperCamelCase__: Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt", type=str, help="URL of the original DPT checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=False, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", action="store_true", ) parser.add_argument( "--model_name", default="dpt-large", type=str, help="Name of the model, in case you're pushing to the hub.", ) parser.add_argument( "--show_prediction", action="store_true", ) UpperCamelCase__: Tuple = parser.parse_args() convert_dpt_checkpoint( args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction )
23
1
'''simple docstring''' import fcntl import os import socket import torch import torch.distributed as dist def __magic_name__ ( *__UpperCAmelCase ) -> Tuple: '''simple docstring''' with open(SCREAMING_SNAKE_CASE__, '''r''' ) as fh: fcntl.flock(SCREAMING_SNAKE_CASE__, fcntl.LOCK_EX ) try: print(*SCREAMING_SNAKE_CASE__ ) finally: fcntl.flock(SCREAMING_SNAKE_CASE__, fcntl.LOCK_UN ) a : Tuple = int(os.environ['LOCAL_RANK']) torch.cuda.set_device(local_rank) a : int = torch.device('cuda', local_rank) a : Union[str, Any] = socket.gethostname() a : Union[str, Any] = f'''[{hostname}-{local_rank}]''' try: # test distributed dist.init_process_group('nccl') dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank a : Union[str, Any] = dist.get_rank() a : Optional[Any] = dist.get_world_size() printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''') dist.barrier() if rank == 0: printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''') except Exception: printflock(f'''{gpu} is broken''') raise
367
'''simple docstring''' from math import asin, atan, cos, radians, sin, sqrt, tan a : Tuple = 6_378_137.0 a : int = 6_356_752.314_245 a : Dict = 637_8137 def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> float: '''simple docstring''' snake_case_ = (AXIS_A - AXIS_B) / AXIS_A snake_case_ = atan((1 - flattening) * tan(radians(__UpperCAmelCase ) ) ) snake_case_ = atan((1 - flattening) * tan(radians(__UpperCAmelCase ) ) ) snake_case_ = radians(__UpperCAmelCase ) snake_case_ = radians(__UpperCAmelCase ) # Equation snake_case_ = sin((phi_a - phi_a) / 2 ) snake_case_ = sin((lambda_a - lambda_a) / 2 ) # Square both values sin_sq_phi *= sin_sq_phi sin_sq_lambda *= sin_sq_lambda snake_case_ = sqrt(sin_sq_phi + (cos(__UpperCAmelCase ) * cos(__UpperCAmelCase ) * sin_sq_lambda) ) return 2 * RADIUS * asin(__UpperCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
72
0
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowercase__ : str = logging.get_logger(__name__) lowercase__ : Any = { 'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json', # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr } class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : Tuple = 'deformable_detr' _snake_case : Dict = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : Optional[Any] , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Dict=3 , lowerCAmelCase__ : List[str]=300 , lowerCAmelCase__ : Union[str, Any]=1024 , lowerCAmelCase__ : Tuple=6 , lowerCAmelCase__ : Union[str, Any]=1024 , lowerCAmelCase__ : List[Any]=8 , lowerCAmelCase__ : List[Any]=6 , lowerCAmelCase__ : Tuple=1024 , lowerCAmelCase__ : List[Any]=8 , lowerCAmelCase__ : Union[str, Any]=0.0 , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Any="relu" , lowerCAmelCase__ : int=256 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : int=0.02 , lowerCAmelCase__ : Any=1.0 , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : int=False , lowerCAmelCase__ : str="sine" , lowerCAmelCase__ : List[Any]="resnet50" , lowerCAmelCase__ : str=True , lowerCAmelCase__ : str=False , lowerCAmelCase__ : List[str]=4 , lowerCAmelCase__ : List[str]=4 , lowerCAmelCase__ : Optional[Any]=4 , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : Optional[int]=300 , lowerCAmelCase__ : int=False , lowerCAmelCase__ : Optional[Any]=1 , lowerCAmelCase__ : Dict=5 , lowerCAmelCase__ : int=2 , lowerCAmelCase__ : Tuple=1 , lowerCAmelCase__ : Optional[Any]=1 , lowerCAmelCase__ : Optional[int]=5 , lowerCAmelCase__ : Dict=2 , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : int=0.25 , lowerCAmelCase__ : Any=False , **lowerCAmelCase__ : Optional[Any] , ) -> str: '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) _UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCamelCase = backbone_config.get('''model_type''' ) _UpperCamelCase = CONFIG_MAPPING[backbone_model_type] _UpperCamelCase = config_class.from_dict(lowerCAmelCase__ ) _UpperCamelCase = use_timm_backbone _UpperCamelCase = backbone_config _UpperCamelCase = num_channels _UpperCamelCase = num_queries _UpperCamelCase = max_position_embeddings _UpperCamelCase = d_model _UpperCamelCase = encoder_ffn_dim _UpperCamelCase = encoder_layers _UpperCamelCase = encoder_attention_heads _UpperCamelCase = decoder_ffn_dim _UpperCamelCase = decoder_layers _UpperCamelCase = decoder_attention_heads _UpperCamelCase = dropout _UpperCamelCase = attention_dropout _UpperCamelCase = activation_dropout _UpperCamelCase = activation_function _UpperCamelCase = init_std _UpperCamelCase = init_xavier_std _UpperCamelCase = encoder_layerdrop _UpperCamelCase = auxiliary_loss _UpperCamelCase = position_embedding_type _UpperCamelCase = backbone _UpperCamelCase = use_pretrained_backbone _UpperCamelCase = dilation # deformable attributes _UpperCamelCase = num_feature_levels _UpperCamelCase = encoder_n_points _UpperCamelCase = decoder_n_points _UpperCamelCase = two_stage _UpperCamelCase = two_stage_num_proposals _UpperCamelCase = with_box_refine if two_stage is True and with_box_refine is False: raise ValueError('''If two_stage is True, with_box_refine must be True.''' ) # Hungarian matcher _UpperCamelCase = class_cost _UpperCamelCase = bbox_cost _UpperCamelCase = giou_cost # Loss coefficients _UpperCamelCase = mask_loss_coefficient _UpperCamelCase = dice_loss_coefficient _UpperCamelCase = bbox_loss_coefficient _UpperCamelCase = giou_loss_coefficient _UpperCamelCase = eos_coefficient _UpperCamelCase = focal_alpha _UpperCamelCase = disable_custom_kernels super().__init__(is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ ) @property def snake_case__ ( self : List[str] ) -> int: '''simple docstring''' return self.encoder_attention_heads @property def snake_case__ ( self : int ) -> int: '''simple docstring''' return self.d_model def snake_case__ ( self : Union[str, Any] ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: _UpperCamelCase = self.backbone_config.to_dict() _UpperCamelCase = self.__class__.model_type return output
324
'''simple docstring''' from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxCrossAttnUpBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, FlaxUpBlockaD, ) @flax.struct.dataclass class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : jnp.ndarray @flax_register_to_config class __lowerCAmelCase ( nn.Module , __magic_name__ , __magic_name__ ): """simple docstring""" _snake_case : int = 3_2 _snake_case : int = 4 _snake_case : int = 4 _snake_case : Tuple[str] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) _snake_case : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D") _snake_case : Union[bool, Tuple[bool]] = False _snake_case : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0) _snake_case : int = 2 _snake_case : Union[int, Tuple[int]] = 8 _snake_case : Optional[Union[int, Tuple[int]]] = None _snake_case : int = 1_2_8_0 _snake_case : float = 0.0 _snake_case : bool = False _snake_case : jnp.dtype = jnp.floataa _snake_case : bool = True _snake_case : int = 0 _snake_case : bool = False def snake_case__ ( self : List[Any] , lowerCAmelCase__ : jax.random.KeyArray ) -> FrozenDict: '''simple docstring''' _UpperCamelCase = (1, self.in_channels, self.sample_size, self.sample_size) _UpperCamelCase = jnp.zeros(lowerCAmelCase__ , dtype=jnp.floataa ) _UpperCamelCase = jnp.ones((1,) , dtype=jnp.intaa ) _UpperCamelCase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) _UpperCamelCase , _UpperCamelCase = jax.random.split(lowerCAmelCase__ ) _UpperCamelCase = {'''params''': params_rng, '''dropout''': dropout_rng} return self.init(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )["params"] def snake_case__ ( self : List[Any] ) -> Any: '''simple docstring''' _UpperCamelCase = self.block_out_channels _UpperCamelCase = block_out_channels[0] * 4 if self.num_attention_heads is not None: raise ValueError( '''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. _UpperCamelCase = self.num_attention_heads or self.attention_head_dim # input _UpperCamelCase = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time _UpperCamelCase = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) _UpperCamelCase = FlaxTimestepEmbedding(lowerCAmelCase__ , dtype=self.dtype ) _UpperCamelCase = self.only_cross_attention if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCamelCase = (only_cross_attention,) * len(self.down_block_types ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCamelCase = (num_attention_heads,) * len(self.down_block_types ) # down _UpperCamelCase = [] _UpperCamelCase = block_out_channels[0] for i, down_block_type in enumerate(self.down_block_types ): _UpperCamelCase = output_channel _UpperCamelCase = block_out_channels[i] _UpperCamelCase = i == len(lowerCAmelCase__ ) - 1 if down_block_type == "CrossAttnDownBlock2D": _UpperCamelCase = FlaxCrossAttnDownBlockaD( in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: _UpperCamelCase = FlaxDownBlockaD( in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(lowerCAmelCase__ ) _UpperCamelCase = down_blocks # mid _UpperCamelCase = FlaxUNetMidBlockaDCrossAttn( in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) # up _UpperCamelCase = [] _UpperCamelCase = list(reversed(lowerCAmelCase__ ) ) _UpperCamelCase = list(reversed(lowerCAmelCase__ ) ) _UpperCamelCase = list(reversed(lowerCAmelCase__ ) ) _UpperCamelCase = reversed_block_out_channels[0] for i, up_block_type in enumerate(self.up_block_types ): _UpperCamelCase = output_channel _UpperCamelCase = reversed_block_out_channels[i] _UpperCamelCase = reversed_block_out_channels[min(i + 1 , len(lowerCAmelCase__ ) - 1 )] _UpperCamelCase = i == len(lowerCAmelCase__ ) - 1 if up_block_type == "CrossAttnUpBlock2D": _UpperCamelCase = FlaxCrossAttnUpBlockaD( in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , prev_output_channel=lowerCAmelCase__ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: _UpperCamelCase = FlaxUpBlockaD( in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , prev_output_channel=lowerCAmelCase__ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , ) up_blocks.append(lowerCAmelCase__ ) _UpperCamelCase = output_channel _UpperCamelCase = up_blocks # out _UpperCamelCase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) _UpperCamelCase = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self : List[str] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int=None , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : bool = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]: '''simple docstring''' if not isinstance(lowerCAmelCase__ , jnp.ndarray ): _UpperCamelCase = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(lowerCAmelCase__ , jnp.ndarray ) and len(timesteps.shape ) == 0: _UpperCamelCase = timesteps.astype(dtype=jnp.floataa ) _UpperCamelCase = jnp.expand_dims(lowerCAmelCase__ , 0 ) _UpperCamelCase = self.time_proj(lowerCAmelCase__ ) _UpperCamelCase = self.time_embedding(lowerCAmelCase__ ) # 2. pre-process _UpperCamelCase = jnp.transpose(lowerCAmelCase__ , (0, 2, 3, 1) ) _UpperCamelCase = self.conv_in(lowerCAmelCase__ ) # 3. down _UpperCamelCase = (sample,) for down_block in self.down_blocks: if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCamelCase , _UpperCamelCase = down_block(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , deterministic=not train ) else: _UpperCamelCase , _UpperCamelCase = down_block(lowerCAmelCase__ , lowerCAmelCase__ , deterministic=not train ) down_block_res_samples += res_samples if down_block_additional_residuals is not None: _UpperCamelCase = () for down_block_res_sample, down_block_additional_residual in zip( lowerCAmelCase__ , lowerCAmelCase__ ): down_block_res_sample += down_block_additional_residual new_down_block_res_samples += (down_block_res_sample,) _UpperCamelCase = new_down_block_res_samples # 4. mid _UpperCamelCase = self.mid_block(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , deterministic=not train ) if mid_block_additional_residual is not None: sample += mid_block_additional_residual # 5. up for up_block in self.up_blocks: _UpperCamelCase = down_block_res_samples[-(self.layers_per_block + 1) :] _UpperCamelCase = down_block_res_samples[: -(self.layers_per_block + 1)] if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCamelCase = up_block( lowerCAmelCase__ , temb=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , res_hidden_states_tuple=lowerCAmelCase__ , deterministic=not train , ) else: _UpperCamelCase = up_block(lowerCAmelCase__ , temb=lowerCAmelCase__ , res_hidden_states_tuple=lowerCAmelCase__ , deterministic=not train ) # 6. post-process _UpperCamelCase = self.conv_norm_out(lowerCAmelCase__ ) _UpperCamelCase = nn.silu(lowerCAmelCase__ ) _UpperCamelCase = self.conv_out(lowerCAmelCase__ ) _UpperCamelCase = jnp.transpose(lowerCAmelCase__ , (0, 3, 1, 2) ) if not return_dict: return (sample,) return FlaxUNetaDConditionOutput(sample=lowerCAmelCase__ )
324
1
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase :str = logging.get_logger(__name__) class _lowerCamelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' A_ : int = ["pixel_values"] def __init__( self : str , _A : Union[str, Any] = True , _A : Optional[int] = None , _A : Dict = 0.9 , _A : Optional[Any] = PILImageResampling.BICUBIC , _A : Tuple = True , _A : Any = None , _A : int = 1 / 255 , _A : Optional[int] = True , _A : Union[str, Any] = True , _A : str = None , _A : Union[str, Any] = None , **_A : int , ) -> None: super().__init__(**__UpperCAmelCase ) __magic_name__ : List[str] = size if size is not None else {"""shortest_edge""": 224} __magic_name__ : Any = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) __magic_name__ : Optional[int] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __magic_name__ : Union[str, Any] = get_size_dict(__UpperCAmelCase , param_name='crop_size' ) __magic_name__ : Dict = do_resize __magic_name__ : int = size __magic_name__ : Optional[int] = crop_pct __magic_name__ : int = resample __magic_name__ : int = do_center_crop __magic_name__ : int = crop_size __magic_name__ : List[Any] = do_rescale __magic_name__ : int = rescale_factor __magic_name__ : Dict = do_normalize __magic_name__ : Any = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN __magic_name__ : Optional[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD def __lowerCAmelCase ( self : Tuple , _A : Optional[Any] , _A : Union[str, Any] , _A : Tuple = None , _A : Any = PILImageResampling.BICUBIC , _A : int = None , **_A : Tuple , ) -> np.ndarray: __magic_name__ : List[str] = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) if "shortest_edge" not in size and ("height" not in size or "width" not in size): raise ValueError(F'size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' ) if crop_pct is not None: if "shortest_edge" in size: __magic_name__ : Tuple = int(size['shortest_edge'] / crop_pct ) elif "height" in size and "width" in size: if size["height"] == size["width"]: __magic_name__ : List[Any] = int(size['height'] / crop_pct ) else: __magic_name__ : str = (int(size['height'] / crop_pct ), int(size['width'] / crop_pct )) else: raise ValueError('Invalid size for resize: {}'.format(__UpperCAmelCase ) ) __magic_name__ : Union[str, Any] = get_resize_output_image_size(__UpperCAmelCase , size=__UpperCAmelCase , default_to_square=__UpperCAmelCase ) else: if "shortest_edge" in size: __magic_name__ : Tuple = get_resize_output_image_size(__UpperCAmelCase , size=size['shortest_edge'] , default_to_square=__UpperCAmelCase ) elif "height" in size and "width" in size: __magic_name__ : int = (size["""height"""], size["""width"""]) else: raise ValueError('Invalid size for resize: {}'.format(__UpperCAmelCase ) ) return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def __lowerCAmelCase ( self : List[str] , _A : Optional[int] , _A : Dict , _A : Dict = None , **_A : List[str] , ) -> np.ndarray: __magic_name__ : Union[str, Any] = get_size_dict(__UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(F'size must contain \'height\' and \'width\' as keys. Got {size.keys()}' ) return center_crop(__UpperCAmelCase , size=(size['height'], size['width']) , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def __lowerCAmelCase ( self : Optional[int] , _A : Optional[int] , _A : int , _A : Optional[int] = None , **_A : Optional[int] , ) -> Union[str, Any]: return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def __lowerCAmelCase ( self : int , _A : List[Any] , _A : str , _A : List[str] , _A : List[Any] = None , **_A : Optional[Any] , ) -> np.ndarray: return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def __lowerCAmelCase ( self : Any , _A : Optional[Any] , _A : List[Any] = None , _A : List[str] = None , _A : Union[str, Any] = None , _A : Optional[Any] = None , _A : Dict = None , _A : Any = None , _A : Optional[int] = None , _A : Optional[int] = None , _A : Any = None , _A : Tuple = None , _A : List[str] = None , _A : Tuple = None , _A : str = ChannelDimension.FIRST , **_A : Union[str, Any] , ) -> PIL.Image.Image: __magic_name__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize __magic_name__ : int = crop_pct if crop_pct is not None else self.crop_pct __magic_name__ : Tuple = resample if resample is not None else self.resample __magic_name__ : int = do_center_crop if do_center_crop is not None else self.do_center_crop __magic_name__ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale __magic_name__ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor __magic_name__ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize __magic_name__ : List[Any] = image_mean if image_mean is not None else self.image_mean __magic_name__ : Dict = image_std if image_std is not None else self.image_std __magic_name__ : Any = size if size is not None else self.size __magic_name__ : Any = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) __magic_name__ : int = crop_size if crop_size is not None else self.crop_size __magic_name__ : Optional[Any] = get_size_dict(__UpperCAmelCase , param_name='crop_size' ) __magic_name__ : List[Any] = make_list_of_images(__UpperCAmelCase ) if not valid_images(__UpperCAmelCase ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_center_crop and crop_pct is None: raise ValueError('Crop_pct must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. __magic_name__ : Union[str, Any] = [to_numpy_array(__UpperCAmelCase ) for image in images] if do_resize: __magic_name__ : int = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , crop_pct=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images] if do_center_crop: __magic_name__ : Union[str, Any] = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images] if do_rescale: __magic_name__ : List[str] = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images] if do_normalize: __magic_name__ : Optional[int] = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images] __magic_name__ : Union[str, Any] = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images] __magic_name__ : Dict = {"""pixel_values""": images} return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
366
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging lowerCAmelCase :Tuple = logging.get_logger(__name__) class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : int = ["""pixel_values"""] def __init__( self : Any , _A : bool = True , _A : Optional[Dict[str, int]] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : Optional[Any] , ) -> None: super().__init__(**_A ) __magic_name__ : List[str] = size if size is not None else {'shortest_edge': 256} __magic_name__ : str = get_size_dict(_A , default_to_square=_A ) __magic_name__ : List[str] = crop_size if crop_size is not None else {'height': 224, 'width': 224} __magic_name__ : Optional[int] = get_size_dict(_A ) __magic_name__ : Union[str, Any] = do_resize __magic_name__ : List[Any] = size __magic_name__ : List[str] = resample __magic_name__ : Dict = do_center_crop __magic_name__ : List[str] = crop_size __magic_name__ : int = do_rescale __magic_name__ : Tuple = rescale_factor __magic_name__ : List[str] = do_normalize __magic_name__ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __magic_name__ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD def __lowerCAmelCase ( self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : List[str] , ) -> np.ndarray: __magic_name__ : Optional[Any] = get_size_dict(_A , default_to_square=_A ) if "shortest_edge" not in size: raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) __magic_name__ : Dict = get_resize_output_image_size(_A , size=size['shortest_edge'] , default_to_square=_A ) return resize(_A , size=_A , resample=_A , data_format=_A , **_A ) def __lowerCAmelCase ( self : Dict , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[int] , ) -> np.ndarray: __magic_name__ : int = get_size_dict(_A ) return center_crop(_A , size=(size['height'], size['width']) , data_format=_A , **_A ) def __lowerCAmelCase ( self : List[str] , _A : np.ndarray , _A : float , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple ) -> np.ndarray: return rescale(_A , scale=_A , data_format=_A , **_A ) def __lowerCAmelCase ( self : str , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> np.ndarray: return normalize(_A , mean=_A , std=_A , data_format=_A , **_A ) def __lowerCAmelCase ( self : List[str] , _A : ImageInput , _A : Optional[bool] = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : Optional[bool] = None , _A : Optional[float] = None , _A : Optional[bool] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_A : List[Any] , ) -> List[str]: __magic_name__ : int = do_resize if do_resize is not None else self.do_resize __magic_name__ : Tuple = size if size is not None else self.size __magic_name__ : Optional[Any] = get_size_dict(_A , default_to_square=_A ) __magic_name__ : Dict = resample if resample is not None else self.resample __magic_name__ : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop __magic_name__ : Dict = crop_size if crop_size is not None else self.crop_size __magic_name__ : List[str] = get_size_dict(_A ) __magic_name__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale __magic_name__ : str = rescale_factor if rescale_factor is not None else self.rescale_factor __magic_name__ : Any = do_normalize if do_normalize is not None else self.do_normalize __magic_name__ : Tuple = image_mean if image_mean is not None else self.image_mean __magic_name__ : Union[str, Any] = image_std if image_std is not None else self.image_std __magic_name__ : int = make_list_of_images(_A ) if not valid_images(_A ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. __magic_name__ : List[Any] = [to_numpy_array(_A ) for image in images] if do_resize: __magic_name__ : Union[str, Any] = [self.resize(image=_A , size=_A , resample=_A ) for image in images] if do_center_crop: __magic_name__ : Union[str, Any] = [self.center_crop(image=_A , size=_A ) for image in images] if do_rescale: __magic_name__ : List[Any] = [self.rescale(image=_A , scale=_A ) for image in images] if do_normalize: __magic_name__ : Optional[Any] = [self.normalize(image=_A , mean=_A , std=_A ) for image in images] __magic_name__ : Union[str, Any] = [to_channel_dimension_format(_A , _A ) for image in images] __magic_name__ : List[str] = {'pixel_values': images} return BatchFeature(data=_A , tensor_type=_A )
275
0
"""simple docstring""" import inspect import unittest from transformers import ViTHybridConfig from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class _UpperCAmelCase : def __init__( self :Optional[int] , __UpperCamelCase :List[Any] , __UpperCamelCase :List[Any]=13 , __UpperCamelCase :int=64 , __UpperCamelCase :Optional[int]=2 , __UpperCamelCase :str=3 , __UpperCamelCase :Any=True , __UpperCamelCase :Optional[int]=True , __UpperCamelCase :Dict=32 , __UpperCamelCase :List[Any]=5 , __UpperCamelCase :str=4 , __UpperCamelCase :str=37 , __UpperCamelCase :Optional[int]="gelu" , __UpperCamelCase :List[str]=0.1 , __UpperCamelCase :Any=0.1 , __UpperCamelCase :Tuple=10 , __UpperCamelCase :Dict=0.02 , __UpperCamelCase :List[Any]=[1, 16, 4, 4] , __UpperCamelCase :List[str]=None , ): A = parent A = batch_size A = image_size A = patch_size A = num_channels A = is_training A = use_labels A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = type_sequence_label_size A = initializer_range A = scope A = backbone_featmap_shape # in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) # the number of patches is based on the feature map of the backbone, which by default uses an output stride # of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size A = (self.image_size // 32) ** 2 A = num_patches + 1 def lowerCamelCase ( self :Optional[int] ): A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A = None if self.use_labels: A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A = self.get_config() return config, pixel_values, labels def lowerCamelCase ( self :List[str] ): A = { "global_padding": "same", "layer_type": "bottleneck", "depths": [3, 4, 9], "out_features": ["stage1", "stage2", "stage3"], "embedding_dynamic_padding": True, "hidden_sizes": [4, 8, 16, 32], "num_groups": 2, } return ViTHybridConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=__UpperCamelCase , ) def lowerCamelCase ( self :Tuple , __UpperCamelCase :Optional[int] , __UpperCamelCase :int , __UpperCamelCase :Union[str, Any] ): A = ViTHybridModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() A = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :Tuple , __UpperCamelCase :Optional[Any] , __UpperCamelCase :List[Any] ): A = self.type_sequence_label_size A = ViTHybridForImageClassification(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() A = model(__UpperCamelCase , labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCamelCase ( self :str ): A = self.prepare_config_and_inputs() A, A, A = config_and_inputs A = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ): UpperCamelCase = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else () UpperCamelCase = ( {'''feature-extraction''': ViTHybridModel, '''image-classification''': ViTHybridForImageClassification} if is_torch_available() else {} ) UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False def lowerCamelCase ( self :Dict ): A = ViTHybridModelTester(self ) A = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 ) def lowerCamelCase ( self :Any ): self.config_tester.run_common_tests() @unittest.skip(reason="ViT does not use inputs_embeds" ) def lowerCamelCase ( self :List[str] ): pass def lowerCamelCase ( self :Dict ): A, A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(__UpperCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) ) def lowerCamelCase ( self :Any ): A, A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(__UpperCamelCase ) A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A = [*signature.parameters.keys()] A = ["pixel_values"] self.assertListEqual(arg_names[:1] , __UpperCamelCase ) def lowerCamelCase ( self :Dict ): A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def lowerCamelCase ( self :Union[str, Any] ): A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase ) def lowerCamelCase ( self :List[str] ): A, A = self.model_tester.prepare_config_and_inputs_for_common() A = _config_zero_init(__UpperCamelCase ) for model_class in self.all_model_classes: A = model_class(config=__UpperCamelCase ) # Skip the check for the backbone for name, module in model.named_modules(): if module.__class__.__name__ == "ViTHybridPatchEmbeddings": A = [f"{name}.{key}" for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , ) @slow def lowerCamelCase ( self :Tuple ): for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A = ViTHybridModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def A__ ( ): A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _UpperCAmelCase ( unittest.TestCase ): @cached_property def lowerCamelCase ( self :Optional[int] ): return ( ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowerCamelCase ( self :Optional[Any] ): A = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( __UpperCamelCase ) A = self.default_image_processor A = prepare_img() A = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase ) # forward pass with torch.no_grad(): A = model(**__UpperCamelCase ) # verify the logits A = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __UpperCamelCase ) A = torch.tensor([-1.9_090, -0.4_993, -0.2_389] ).to(__UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) ) @slow @require_accelerate def lowerCamelCase ( self :Dict ): A = ViTHybridImageProcessor.from_pretrained("google/vit-hybrid-base-bit-384" ) A = ViTHybridForImageClassification.from_pretrained("google/vit-hybrid-base-bit-384" , device_map="auto" ) A = prepare_img() A = image_processor(images=__UpperCamelCase , return_tensors="pt" ) A = model(**__UpperCamelCase ) A = outputs.logits # model predicts one of the 1000 ImageNet classes A = logits.argmax(-1 ).item() self.assertTrue(model.config.idalabel[predicted_class_idx] , "tabby, tabby cat" )
292
"""simple docstring""" from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging _snake_case : List[Any] = logging.get_logger(__name__) _snake_case : int = { 'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json', # See all Marian models at https://huggingface.co/models?filter=marian } class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = '''marian''' UpperCamelCase = ['''past_key_values'''] UpperCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self :int , __UpperCamelCase :Any=5_81_01 , __UpperCamelCase :int=None , __UpperCamelCase :Union[str, Any]=10_24 , __UpperCamelCase :Union[str, Any]=12 , __UpperCamelCase :str=40_96 , __UpperCamelCase :int=16 , __UpperCamelCase :int=12 , __UpperCamelCase :Optional[Any]=40_96 , __UpperCamelCase :Optional[Any]=16 , __UpperCamelCase :Dict=0.0 , __UpperCamelCase :Dict=0.0 , __UpperCamelCase :str=True , __UpperCamelCase :Optional[int]=True , __UpperCamelCase :Any="gelu" , __UpperCamelCase :Any=10_24 , __UpperCamelCase :List[Any]=0.1 , __UpperCamelCase :Optional[Any]=0.0 , __UpperCamelCase :Union[str, Any]=0.0 , __UpperCamelCase :Tuple=0.02 , __UpperCamelCase :List[str]=5_81_00 , __UpperCamelCase :str=False , __UpperCamelCase :Optional[int]=5_81_00 , __UpperCamelCase :List[Any]=0 , __UpperCamelCase :List[str]=0 , __UpperCamelCase :Dict=True , **__UpperCamelCase :Tuple , ): A = vocab_size A = decoder_vocab_size or vocab_size A = max_position_embeddings A = d_model A = encoder_ffn_dim A = encoder_layers A = encoder_attention_heads A = decoder_ffn_dim A = decoder_layers A = decoder_attention_heads A = dropout A = attention_dropout A = activation_dropout A = activation_function A = init_std A = encoder_layerdrop A = decoder_layerdrop A = use_cache A = encoder_layers A = scale_embedding # scale factor will be sqrt(d_model) if True A = share_encoder_decoder_embeddings super().__init__( pad_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , forced_eos_token_id=__UpperCamelCase , **__UpperCamelCase , ) class _UpperCAmelCase ( lowercase_ ): @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs def lowerCamelCase ( self :List[str] ): if self.task in ["default", "seq2seq-lm"]: A = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: A = {0: "batch"} A = {0: "batch", 1: "past_decoder_sequence + sequence"} else: A = {0: "batch", 1: "decoder_sequence"} A = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(__UpperCamelCase , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. A = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: A, A = self.num_layers for i in range(__UpperCamelCase ): A = {0: "batch", 2: "past_sequence + sequence"} A = {0: "batch", 2: "past_sequence + sequence"} else: A = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs def lowerCamelCase ( self :List[str] ): if self.task in ["default", "seq2seq-lm"]: A = super().outputs else: A = super(__UpperCamelCase , self ).outputs if self.use_past: A, A = self.num_layers for i in range(__UpperCamelCase ): A = {0: "batch", 2: "past_sequence + sequence"} A = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ): A = self._generate_dummy_inputs_for_encoder_and_decoder( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # Generate decoder inputs A = seq_length if not self.use_past else 1 A = self._generate_dummy_inputs_for_encoder_and_decoder( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) A = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} A = dict(**__UpperCamelCase , **__UpperCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch A, A = common_inputs["input_ids"].shape A = common_inputs["decoder_input_ids"].shape[1] A, A = self.num_attention_heads A = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) A = decoder_seq_length + 3 A = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) A = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase )] , dim=1 ) A = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered A, A = self.num_layers A = min(__UpperCamelCase , __UpperCamelCase ) A = max(__UpperCamelCase , __UpperCamelCase ) - min_num_layers A = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(__UpperCamelCase ): common_inputs["past_key_values"].append( ( torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase ), ) ) # TODO: test this. A = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(__UpperCamelCase , __UpperCamelCase ): common_inputs["past_key_values"].append((torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) ) return common_inputs def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ): A = self._generate_dummy_inputs_for_encoder_and_decoder( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch A, A = common_inputs["input_ids"].shape # Not using the same length for past_key_values A = seqlen + 2 A, A = self.num_layers A, A = self.num_attention_heads A = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) A = common_inputs["attention_mask"].dtype A = torch.cat( [common_inputs["attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase , dtype=__UpperCamelCase )] , dim=1 ) A = [ (torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) for _ in range(__UpperCamelCase ) ] return common_inputs def lowerCamelCase ( self :Tuple , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX A = compute_effective_axis_dimension( __UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX A = tokenizer.num_special_tokens_to_add(__UpperCamelCase ) A = compute_effective_axis_dimension( __UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCamelCase ) # Generate dummy inputs according to compute batch and sequence A = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size A = dict(tokenizer(__UpperCamelCase , return_tensors=__UpperCamelCase ) ) return common_inputs def lowerCamelCase ( self :List[Any] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ): if self.task in ["default", "seq2seq-lm"]: A = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase ) else: A = self._generate_dummy_inputs_for_causal_lm( __UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase ) return common_inputs def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Tuple , __UpperCamelCase :List[str] , __UpperCamelCase :str , __UpperCamelCase :str ): if self.task in ["default", "seq2seq-lm"]: A = super()._flatten_past_key_values_(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) else: A = super(__UpperCamelCase , self )._flatten_past_key_values_( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) @property def lowerCamelCase ( self :List[str] ): return 1e-4
292
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowercase : str = logging.get_logger(__name__) lowercase : str = '''▁''' lowercase : Dict = {'''vocab_file''': '''sentencepiece.bpe.model'''} lowercase : Tuple = { '''vocab_file''': { '''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model''', '''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model''', '''xlm-roberta-large-finetuned-conll02-dutch''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll02-spanish''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll03-english''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll03-german''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model''' ), } } lowercase : List[Any] = { '''xlm-roberta-base''': 5_12, '''xlm-roberta-large''': 5_12, '''xlm-roberta-large-finetuned-conll02-dutch''': 5_12, '''xlm-roberta-large-finetuned-conll02-spanish''': 5_12, '''xlm-roberta-large-finetuned-conll03-english''': 5_12, '''xlm-roberta-large-finetuned-conll03-german''': 5_12, } class UpperCAmelCase_ ( __lowerCAmelCase ): '''simple docstring''' A : Union[str, Any] = VOCAB_FILES_NAMES A : List[Any] = PRETRAINED_VOCAB_FILES_MAP A : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A : Any = ['''input_ids''', '''attention_mask'''] def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> None: # Mask token behave like a normal word, i.e. include the space before it snake_case_ : Optional[int] = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token snake_case_ : int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , ) snake_case_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(lowerCAmelCase_ ) ) snake_case_ : Dict = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token snake_case_ : Union[str, Any] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab snake_case_ : Any = 1 snake_case_ : Dict = len(self.sp_model ) + self.fairseq_offset snake_case_ : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> int: snake_case_ : Dict = self.__dict__.copy() snake_case_ : Any = None snake_case_ : Any = self.sp_model.serialized_model_proto() return state def __setstate__( self , _SCREAMING_SNAKE_CASE ) -> Dict: snake_case_ : int = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): snake_case_ : Dict = {} snake_case_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] snake_case_ : Union[str, Any] = [self.cls_token_id] snake_case_ : str = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ ) if token_ids_a is None: return [1] + ([0] * len(lowerCAmelCase_ )) + [1] return [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1] + ([0] * len(lowerCAmelCase_ )) + [1] def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]: snake_case_ : Optional[int] = [self.sep_token_id] snake_case_ : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _lowerCAmelCase ( self ) -> List[Any]: return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token def _lowerCAmelCase ( self ) -> Optional[int]: snake_case_ : List[str] = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> List[str]: return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ ) def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] snake_case_ : Any = self.sp_model.PieceToId(lowerCAmelCase_ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Any: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: snake_case_ : Any = "".join(lowerCAmelCase_ ).replace(lowerCAmelCase_ , " " ).strip() return out_string def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]: if not os.path.isdir(lowerCAmelCase_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case_ : List[Any] = os.path.join( lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCAmelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(lowerCAmelCase_ , "wb" ) as fi: snake_case_ : Dict = self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase_ ) return (out_vocab_file,)
364
import datasets from .evaluate import evaluate lowercase : Dict = '''\ @article{hendrycks2021cuad, title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review}, author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball}, journal={arXiv preprint arXiv:2103.06268}, year={2021} } ''' lowercase : int = ''' This metric wrap the official scoring script for version 1 of the Contract Understanding Atticus Dataset (CUAD). Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510 commercial legal contracts that have been manually labeled to identify 41 categories of important clauses that lawyers look for when reviewing contracts in connection with corporate transactions. ''' lowercase : int = ''' Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall). Args: predictions: List of question-answers dictionaries with the following key-values: - \'id\': id of the question-answer pair as given in the references (see below) - \'prediction_text\': list of possible texts for the answer, as a list of strings depending on a threshold on the confidence probability of each prediction. references: List of question-answers dictionaries with the following key-values: - \'id\': id of the question-answer pair (see above), - \'answers\': a Dict in the CUAD dataset format { \'text\': list of possible texts for the answer, as a list of strings \'answer_start\': list of start positions for the answer, as a list of ints } Note that answer_start values are not taken into account to compute the metric. Returns: \'exact_match\': Exact match (the normalized answer exactly match the gold answer) \'f1\': The F-score of predicted tokens versus the gold answer \'aupr\': Area Under the Precision-Recall curve \'prec_at_80_recall\': Precision at 80% recall \'prec_at_90_recall\': Precision at 90% recall Examples: >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}] >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}] >>> cuad_metric = datasets.load_metric("cuad") >>> results = cuad_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase_ ( datasets.Metric ): '''simple docstring''' def _lowerCAmelCase ( self ) -> Optional[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": { "id": datasets.Value("string" ), "prediction_text": datasets.features.Sequence(datasets.Value("string" ) ), }, "references": { "id": datasets.Value("string" ), "answers": datasets.features.Sequence( { "text": datasets.Value("string" ), "answer_start": datasets.Value("int32" ), } ), }, } ) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , ) def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: snake_case_ : Union[str, Any] = {prediction["id"]: prediction["prediction_text"] for prediction in predictions} snake_case_ : Optional[Any] = [ { "paragraphs": [ { "qas": [ { "answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]], "id": ref["id"], } for ref in references ] } ] } ] snake_case_ : Any = evaluate(dataset=_SCREAMING_SNAKE_CASE , predictions=_SCREAMING_SNAKE_CASE ) return score
36
0
import numpy as np def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 1E-12 , UpperCamelCase__ = 100 , ) -> tuple[float, np.ndarray]: '''simple docstring''' assert np.shape(UpperCamelCase__ )[0] == np.shape(UpperCamelCase__ )[1] # Ensure proper dimensionality. assert np.shape(UpperCamelCase__ )[0] == np.shape(UpperCamelCase__ )[0] # Ensure inputs are either both complex or both real assert np.iscomplexobj(UpperCamelCase__ ) == np.iscomplexobj(UpperCamelCase__ ) UpperCAmelCase = np.iscomplexobj(UpperCamelCase__ ) if is_complex: # Ensure complex input_matrix is Hermitian assert np.array_equal(UpperCamelCase__ , input_matrix.conj().T ) # Set convergence to False. Will define convergence when we exceed max_iterations # or when we have small changes from one iteration to next. UpperCAmelCase = False UpperCAmelCase = 0 UpperCAmelCase = 0 UpperCAmelCase = 1E12 while not convergence: # Multiple matrix by the vector. UpperCAmelCase = np.dot(UpperCamelCase__ , UpperCamelCase__ ) # Normalize the resulting output vector. UpperCAmelCase = w / np.linalg.norm(UpperCamelCase__ ) # Find rayleigh quotient # (faster than usual b/c we know vector is normalized already) UpperCAmelCase = vector.conj().T if is_complex else vector.T UpperCAmelCase = np.dot(UpperCamelCase__ , np.dot(UpperCamelCase__ , UpperCamelCase__ ) ) # Check convergence. UpperCAmelCase = np.abs(lambda_ - lambda_previous ) / lambda_ iterations += 1 if error <= error_tol or iterations >= max_iterations: UpperCAmelCase = True UpperCAmelCase = lambda_ if is_complex: UpperCAmelCase = np.real(lambda_ ) return lambda_, vector def __SCREAMING_SNAKE_CASE ( ) -> None: '''simple docstring''' UpperCAmelCase = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] ) UpperCAmelCase = np.array([41, 4, 20] ) UpperCAmelCase = real_input_matrix.astype(np.complexaaa ) UpperCAmelCase = np.triu(1J * complex_input_matrix , 1 ) complex_input_matrix += imag_matrix complex_input_matrix += -1 * imag_matrix.T UpperCAmelCase = np.array([41, 4, 20] ).astype(np.complexaaa ) for problem_type in ["real", "complex"]: if problem_type == "real": UpperCAmelCase = real_input_matrix UpperCAmelCase = real_vector elif problem_type == "complex": UpperCAmelCase = complex_input_matrix UpperCAmelCase = complex_vector # Our implementation. UpperCAmelCase , UpperCAmelCase = power_iteration(UpperCamelCase__ , UpperCamelCase__ ) # Numpy implementation. # Get eigenvalues and eigenvectors using built-in numpy # eigh (eigh used for symmetric or hermetian matrices). UpperCAmelCase , UpperCAmelCase = np.linalg.eigh(UpperCamelCase__ ) # Last eigenvalue is the maximum one. UpperCAmelCase = eigen_values[-1] # Last column in this matrix is eigenvector corresponding to largest eigenvalue. UpperCAmelCase = eigen_vectors[:, -1] # Check our implementation and numpy gives close answers. assert np.abs(eigen_value - eigen_value_max ) <= 1E-6 # Take absolute values element wise of each eigenvector. # as they are only unique to a minus sign. assert np.linalg.norm(np.abs(UpperCamelCase__ ) - np.abs(UpperCamelCase__ ) ) <= 1E-6 if __name__ == "__main__": import doctest doctest.testmod() test_power_iteration()
273
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> list[int]: '''simple docstring''' if length <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError('''Length must be a positive integer.''' ) return [n * (2 * n - 1) for n in range(UpperCamelCase__ )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
273
1
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _SCREAMING_SNAKE_CASE( A , A , unittest.TestCase ): SCREAMING_SNAKE_CASE_ : str = StableDiffusionSAGPipeline SCREAMING_SNAKE_CASE_ : List[str] = TEXT_TO_IMAGE_PARAMS SCREAMING_SNAKE_CASE_ : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS SCREAMING_SNAKE_CASE_ : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS SCREAMING_SNAKE_CASE_ : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS SCREAMING_SNAKE_CASE_ : str = False def _UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE :List[str] = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') ,cross_attention_dim=32 ,) __SCREAMING_SNAKE_CASE :Dict = DDIMScheduler( beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='''scaled_linear''' ,clip_sample=SCREAMING_SNAKE_CASE__ ,set_alpha_to_one=SCREAMING_SNAKE_CASE__ ,) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE :Dict = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE :List[str] = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,) __SCREAMING_SNAKE_CASE :List[str] = CLIPTextModel(SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) __SCREAMING_SNAKE_CASE :int = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=0 ) -> Dict: """simple docstring""" if str(SCREAMING_SNAKE_CASE__ ).startswith('''mps''' ): __SCREAMING_SNAKE_CASE :List[str] = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) else: __SCREAMING_SNAKE_CASE :Optional[int] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :int = { '''prompt''': '''.''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 1.0, '''sag_scale''': 1.0, '''output_type''': '''numpy''', } return inputs def _UpperCamelCase ( self ) -> List[str]: """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class _SCREAMING_SNAKE_CASE( unittest.TestCase ): def _UpperCamelCase ( self ) -> List[str]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _UpperCamelCase ( self ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE :Tuple = StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' ) __SCREAMING_SNAKE_CASE :Tuple = sag_pipe.to(SCREAMING_SNAKE_CASE__ ) sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :Any = '''.''' __SCREAMING_SNAKE_CASE :Any = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE :Dict = sag_pipe( [prompt] ,generator=SCREAMING_SNAKE_CASE__ ,guidance_scale=7.5 ,sag_scale=1.0 ,num_inference_steps=20 ,output_type='''np''' ) __SCREAMING_SNAKE_CASE :str = output.images __SCREAMING_SNAKE_CASE :Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) __SCREAMING_SNAKE_CASE :int = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2 def _UpperCamelCase ( self ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE :Any = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) __SCREAMING_SNAKE_CASE :Optional[int] = sag_pipe.to(SCREAMING_SNAKE_CASE__ ) sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :int = '''.''' __SCREAMING_SNAKE_CASE :Any = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE :Dict = sag_pipe( [prompt] ,generator=SCREAMING_SNAKE_CASE__ ,guidance_scale=7.5 ,sag_scale=1.0 ,num_inference_steps=20 ,output_type='''np''' ) __SCREAMING_SNAKE_CASE :Optional[int] = output.images __SCREAMING_SNAKE_CASE :List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) __SCREAMING_SNAKE_CASE :List[str] = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2 def _UpperCamelCase ( self ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE :int = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) __SCREAMING_SNAKE_CASE :Optional[int] = sag_pipe.to(SCREAMING_SNAKE_CASE__ ) sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :Optional[Any] = '''.''' __SCREAMING_SNAKE_CASE :List[str] = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE :Optional[int] = sag_pipe( [prompt] ,width=7_68 ,height=5_12 ,generator=SCREAMING_SNAKE_CASE__ ,guidance_scale=7.5 ,sag_scale=1.0 ,num_inference_steps=20 ,output_type='''np''' ,) __SCREAMING_SNAKE_CASE :List[str] = output.images assert image.shape == (1, 5_12, 7_68, 3)
365
"""simple docstring""" import colorsys from PIL import Image # type: ignore def __lowerCamelCase ( a_ : float , a_ : float , a_ : int ) -> float: __SCREAMING_SNAKE_CASE :List[Any] = x __SCREAMING_SNAKE_CASE :List[Any] = y for step in range(a_ ): # noqa: B007 __SCREAMING_SNAKE_CASE :Dict = a * a - b * b + x __SCREAMING_SNAKE_CASE :Tuple = 2 * a * b + y __SCREAMING_SNAKE_CASE :Dict = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def __lowerCamelCase ( a_ : float ) -> tuple: if distance == 1: return (0, 0, 0) else: return (2_55, 2_55, 2_55) def __lowerCamelCase ( a_ : float ) -> tuple: if distance == 1: return (0, 0, 0) else: return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(a_ , 1 , 1 ) ) def __lowerCamelCase ( a_ : int = 8_00 , a_ : int = 6_00 , a_ : float = -0.6 , a_ : float = 0 , a_ : float = 3.2 , a_ : int = 50 , a_ : bool = True , ) -> Image.Image: __SCREAMING_SNAKE_CASE :Optional[int] = Image.new('''RGB''' , (image_width, image_height) ) __SCREAMING_SNAKE_CASE :Tuple = img.load() # loop through the image-coordinates for image_x in range(a_ ): for image_y in range(a_ ): # determine the figure-coordinates based on the image-coordinates __SCREAMING_SNAKE_CASE :Dict = figure_width / image_width * image_height __SCREAMING_SNAKE_CASE :str = figure_center_x + (image_x / image_width - 0.5) * figure_width __SCREAMING_SNAKE_CASE :Tuple = figure_center_y + (image_y / image_height - 0.5) * figure_height __SCREAMING_SNAKE_CASE :List[Any] = get_distance(a_ , a_ , a_ ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: __SCREAMING_SNAKE_CASE :Optional[int] = get_color_coded_rgb(a_ ) else: __SCREAMING_SNAKE_CASE :Optional[Any] = get_black_and_white_rgb(a_ ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure lowerCamelCase_ = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
239
0
'''simple docstring''' from itertools import product def __lowercase ( __lowercase , __lowercase ) -> list[int]: '''simple docstring''' _A = sides_number _A = max_face_number * dice_number _A = [0] * (max_total + 1) _A = 1 _A = range(__lowercase , max_face_number + 1 ) for dice_numbers in product(__lowercase , repeat=__lowercase ): _A = sum(__lowercase ) totals_frequencies[total] += 1 return totals_frequencies def __lowercase ( ) -> float: '''simple docstring''' _A = total_frequency_distribution( sides_number=4 , dice_number=9 ) _A = total_frequency_distribution( sides_number=6 , dice_number=6 ) _A = 0 _A = 9 _A = 4 * 9 _A = 6 for peter_total in range(__lowercase , max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) _A = (4**9) * (6**6) _A = peter_wins_count / total_games_number _A = round(__lowercase , ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(F"""{solution() = }""")
79
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase_ = { '''configuration_longformer''': [ '''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongformerConfig''', '''LongformerOnnxConfig''', ], '''tokenization_longformer''': ['''LongformerTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ['''LongformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LongformerForMaskedLM''', '''LongformerForMultipleChoice''', '''LongformerForQuestionAnswering''', '''LongformerForSequenceClassification''', '''LongformerForTokenClassification''', '''LongformerModel''', '''LongformerPreTrainedModel''', '''LongformerSelfAttention''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFLongformerForMaskedLM''', '''TFLongformerForMultipleChoice''', '''TFLongformerForQuestionAnswering''', '''TFLongformerForSequenceClassification''', '''TFLongformerForTokenClassification''', '''TFLongformerModel''', '''TFLongformerPreTrainedModel''', '''TFLongformerSelfAttention''', ] if TYPE_CHECKING: from .configuration_longformer import ( LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) from .tokenization_longformer import LongformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_longformer_fast import LongformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, LongformerSelfAttention, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, TFLongformerSelfAttention, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
79
1
'''simple docstring''' from __future__ import annotations __lowerCAmelCase = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0] __lowerCAmelCase = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1] def __lowerCamelCase ( lowerCAmelCase_ ) -> list[float]: _a : Optional[Any] = [] _a : int = len(lowerCAmelCase_ ) for i in range(lowerCAmelCase_ ): _a : float = -1 for j in range(i + 1 , lowerCAmelCase_ ): if arr[i] < arr[j]: _a : List[Any] = arr[j] break result.append(lowerCAmelCase_ ) return result def __lowerCamelCase ( lowerCAmelCase_ ) -> list[float]: _a : Any = [] for i, outer in enumerate(lowerCAmelCase_ ): _a : float = -1 for inner in arr[i + 1 :]: if outer < inner: _a : Any = inner break result.append(lowerCAmelCase_ ) return result def __lowerCamelCase ( lowerCAmelCase_ ) -> list[float]: _a : List[Any] = len(lowerCAmelCase_ ) _a : list[float] = [] _a : list[float] = [-1] * arr_size for index in reversed(range(lowerCAmelCase_ ) ): if stack: while stack[-1] <= arr[index]: stack.pop() if not stack: break if stack: _a : Any = stack[-1] stack.append(arr[index] ) return result if __name__ == "__main__": from doctest import testmod from timeit import timeit testmod() print(next_greatest_element_slow(arr)) print(next_greatest_element_fast(arr)) print(next_greatest_element(arr)) __lowerCAmelCase = ( '''from __main__ import arr, next_greatest_element_slow, ''' '''next_greatest_element_fast, next_greatest_element''' ) print( '''next_greatest_element_slow():''', timeit('''next_greatest_element_slow(arr)''', setup=setup), ) print( '''next_greatest_element_fast():''', timeit('''next_greatest_element_fast(arr)''', setup=setup), ) print( ''' next_greatest_element():''', timeit('''next_greatest_element(arr)''', setup=setup), )
107
'''simple docstring''' import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class __magic_name__ ( unittest.TestCase ): def __lowercase ( self : int ,_UpperCAmelCase : int ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ): self.assertEqual(len(_UpperCAmelCase ) ,len(_UpperCAmelCase ) ) for a, b in zip(_UpperCAmelCase ,_UpperCAmelCase ): self.assertAlmostEqual(_UpperCAmelCase ,_UpperCAmelCase ,delta=_UpperCAmelCase ) def __lowercase ( self : int ): _a : int = GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(_UpperCAmelCase ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step ,3 ) self.assertEqual(len(accumulator.gradients ) ,1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() ,[-2.0, 5.0] ,tol=1E-2 ) accumulator.reset() self.assertEqual(accumulator.step ,0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() ,[0.0, 0.0] ,tol=1E-2 ) def __lowercase ( self : Any ): _a : int = None ops.enable_eager_execution_internal() _a : Optional[int] = tf.config.list_physical_devices('CPU' ) if len(_UpperCAmelCase ) == 1: tf.config.set_logical_device_configuration( physical_devices[0] ,[tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) _a : Tuple = tf.config.list_logical_devices(device_type='CPU' ) _a : List[str] = tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): _a : Tuple = GradientAccumulator() _a : List[Any] = tf.Variable([4.0, 3.0] ) _a , _a : Dict = create_optimizer(5E-5 ,10 ,5 ) _a : Tuple = tf.Variable([0.0, 0.0] ,trainable=_UpperCAmelCase ) def accumulate_on_replica(_UpperCAmelCase : str ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients ,[variable] ) ) ) @tf.function def accumulate(_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[int] ): with strategy.scope(): _a : Union[str, Any] = strategy.experimental_local_results(_UpperCAmelCase ) local_variables[0].assign(_UpperCAmelCase ) local_variables[1].assign(_UpperCAmelCase ) strategy.run(_UpperCAmelCase ,args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(_UpperCAmelCase ) def _check_local_values(_UpperCAmelCase : Tuple ,_UpperCAmelCase : List[str] ): _a : List[Any] = strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value() ,_UpperCAmelCase ,tol=1E-2 ) self.assertListAlmostEqual(values[1].value() ,_UpperCAmelCase ,tol=1E-2 ) accumulate([1.0, 2.0] ,[-1.0, 1.0] ) accumulate([3.0, -1.0] ,[-1.0, -1.0] ) accumulate([-2.0, 2.0] ,[3.0, -2.0] ) self.assertEqual(accumulator.step ,3 ) _check_local_values([2.0, 3.0] ,[1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value() ,[4.0, 3.0] ,tol=1E-2 ) accumulator.reset() self.assertEqual(accumulator.step ,0 ) _check_local_values([0.0, 0.0] ,[0.0, 0.0] )
107
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A__ : Union[str, Any] = { "configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"], "processing_mgp_str": ["MgpstrProcessor"], "tokenization_mgp_str": ["MgpstrTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : str = [ "MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST", "MgpstrModel", "MgpstrPreTrainedModel", "MgpstrForSceneTextRecognition", ] if TYPE_CHECKING: from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig from .processing_mgp_str import MgpstrProcessor from .tokenization_mgp_str import MgpstrTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mgp_str import ( MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST, MgpstrForSceneTextRecognition, MgpstrModel, MgpstrPreTrainedModel, ) else: import sys A__ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
144
'''simple docstring''' import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __a (unittest.TestCase ): def __init__( self : int , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple=3 , __magic_name__ : Tuple=32 , __magic_name__ : Optional[int]=3 , __magic_name__ : Optional[Any]=10 , __magic_name__ : str=[10, 20, 30, 40] , __magic_name__ : str=[1, 1, 2, 1] , __magic_name__ : Union[str, Any]=True , __magic_name__ : str=True , __magic_name__ : Dict="relu" , __magic_name__ : Any=3 , __magic_name__ : List[str]=None , ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : str = parent UpperCAmelCase_ : List[str] = batch_size UpperCAmelCase_ : int = image_size UpperCAmelCase_ : Optional[int] = num_channels UpperCAmelCase_ : Any = embeddings_size UpperCAmelCase_ : Union[str, Any] = hidden_sizes UpperCAmelCase_ : Optional[Any] = depths UpperCAmelCase_ : Any = is_training UpperCAmelCase_ : Optional[Any] = use_labels UpperCAmelCase_ : List[Any] = hidden_act UpperCAmelCase_ : List[str] = num_labels UpperCAmelCase_ : Optional[Any] = scope UpperCAmelCase_ : int = len(__magic_name__ ) def UpperCAmelCase__ ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ : Union[str, Any] = self.get_config() return config, pixel_values def UpperCAmelCase__ ( self : List[Any] ) -> Dict: """simple docstring""" return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def UpperCAmelCase__ ( self : int , __magic_name__ : Union[str, Any] , __magic_name__ : str ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = FlaxRegNetModel(config=__magic_name__ ) UpperCAmelCase_ : Optional[Any] = model(__magic_name__ ) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def UpperCAmelCase__ ( self : List[str] , __magic_name__ : str , __magic_name__ : Tuple ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Optional[int] = self.num_labels UpperCAmelCase_ : str = FlaxRegNetForImageClassification(config=__magic_name__ ) UpperCAmelCase_ : Tuple = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase__ ( self : Tuple ) -> int: """simple docstring""" UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ : str = config_and_inputs UpperCAmelCase_ : Optional[int] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_flax class __a (lowerCamelCase , unittest.TestCase ): __a : Optional[Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () __a : int = False __a : str = False __a : List[str] = False def UpperCAmelCase__ ( self : List[Any] ) -> None: """simple docstring""" UpperCAmelCase_ : Optional[int] = FlaxRegNetModelTester(self ) UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ ) def UpperCAmelCase__ ( self : List[Any] ) -> str: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCAmelCase__ ( self : Any ) -> Optional[Any]: """simple docstring""" return def UpperCAmelCase__ ( self : List[str] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> str: """simple docstring""" UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__magic_name__ ) @unittest.skip(reason='''RegNet does not use inputs_embeds''' ) def UpperCAmelCase__ ( self : Dict ) -> Tuple: """simple docstring""" pass @unittest.skip(reason='''RegNet does not support input and output embeddings''' ) def UpperCAmelCase__ ( self : str ) -> List[str]: """simple docstring""" pass def UpperCAmelCase__ ( self : str ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Optional[int] = model_class(__magic_name__ ) UpperCAmelCase_ : List[Any] = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : Dict = [*signature.parameters.keys()] UpperCAmelCase_ : Optional[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __magic_name__ ) def UpperCAmelCase__ ( self : List[str] ) -> List[str]: """simple docstring""" def check_hidden_states_output(__magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ): UpperCAmelCase_ : Optional[int] = model_class(__magic_name__ ) UpperCAmelCase_ : str = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) UpperCAmelCase_ : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCAmelCase_ : str = self.model_tester.num_stages self.assertEqual(len(__magic_name__ ) , expected_num_stages + 1 ) UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Any = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ : Any = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) def UpperCAmelCase__ ( self : Dict ) -> List[str]: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase_ : str = self._prepare_for_class(__magic_name__ , __magic_name__ ) UpperCAmelCase_ : Optional[int] = model_class(__magic_name__ ) @jax.jit def model_jitted(__magic_name__ : Optional[int] , **__magic_name__ : Optional[Any] ): return model(pixel_values=__magic_name__ , **__magic_name__ ) with self.subTest('''JIT Enabled''' ): UpperCAmelCase_ : Dict = model_jitted(**__magic_name__ ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): UpperCAmelCase_ : Any = model_jitted(**__magic_name__ ).to_tuple() self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) ) for jitted_output, output in zip(__magic_name__ , __magic_name__ ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCamelCase_ ( ) -> Optional[int]: UpperCAmelCase_ : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_flax class __a (unittest.TestCase ): @cached_property def UpperCAmelCase__ ( self : List[str] ) -> Dict: """simple docstring""" return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None @slow def UpperCAmelCase__ ( self : Tuple ) -> Tuple: """simple docstring""" UpperCAmelCase_ : Dict = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' ) UpperCAmelCase_ : Tuple = self.default_image_processor UpperCAmelCase_ : Tuple = prepare_img() UpperCAmelCase_ : Union[str, Any] = image_processor(images=__magic_name__ , return_tensors='''np''' ) UpperCAmelCase_ : int = model(**__magic_name__ ) # verify the logits UpperCAmelCase_ : int = (1, 10_00) self.assertEqual(outputs.logits.shape , __magic_name__ ) UpperCAmelCase_ : Optional[int] = jnp.array([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] ) self.assertTrue(jnp.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1E-4 ) )
125
0
import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": snake_case : Optional[int] = pd.read_csv('''sample_data.csv''', header=None) snake_case : Optional[Any] = df.shape[:1][0] # If you're using some other dataset input the target column snake_case : Tuple = df.iloc[:, 1:2] snake_case : Tuple = actual_data.values.reshape(len_data, 1) snake_case : str = MinMaxScaler().fit_transform(actual_data) snake_case : List[str] = 10 snake_case : Any = 5 snake_case : Optional[Any] = 20 snake_case : List[str] = len_data - periods * look_back snake_case : str = actual_data[:division] snake_case : int = actual_data[division - look_back :] snake_case : List[str] = [], [] snake_case : Union[str, Any] = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) snake_case : List[Any] = np.array(train_x) snake_case : Tuple = np.array(test_x) snake_case : Any = np.array([list(i.ravel()) for i in train_y]) snake_case : List[Any] = np.array([list(i.ravel()) for i in test_y]) snake_case : Union[str, Any] = Sequential() model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(1_28, 1))) model.add(Dense(forward_days)) model.compile(loss='''mean_squared_error''', optimizer='''adam''') snake_case : Tuple = model.fit( x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4 ) snake_case : Optional[int] = model.predict(x_test)
355
from collections import defaultdict from math import ceil, sqrt def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_0_0_0 , __lowerCAmelCase : int = 1_0 ): a__ = defaultdict(__lowerCAmelCase ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: a__ = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: a__ = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(__lowerCAmelCase , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 1_0 ) if __name__ == "__main__": print(f"""{solution() = }""")
109
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE_: str =logging.get_logger(__name__) SCREAMING_SNAKE_CASE_: Optional[Any] ={ 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json', 'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json', 'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json', # See all BigBird models at https://huggingface.co/models?filter=big_bird } class __A ( UpperCamelCase__ ): a__ : Tuple = """big_bird""" def __init__(self : int , __a : Optional[Any]=50358 , __a : Any=768 , __a : Optional[Any]=12 , __a : List[Any]=12 , __a : Any=3072 , __a : Tuple="gelu_new" , __a : str=0.1 , __a : int=0.1 , __a : List[str]=4096 , __a : str=2 , __a : List[str]=0.02 , __a : Any=1E-12 , __a : Any=True , __a : Any=0 , __a : Optional[Any]=1 , __a : List[Any]=2 , __a : Dict=66 , __a : Optional[int]="block_sparse" , __a : List[Any]=True , __a : Tuple=False , __a : Any=64 , __a : Optional[Any]=3 , __a : Tuple=None , **__a : Union[str, Any] , ): super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , sep_token_id=__a , **__a , ) UpperCAmelCase_ = vocab_size UpperCAmelCase_ = max_position_embeddings UpperCAmelCase_ = hidden_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = hidden_act UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = initializer_range UpperCAmelCase_ = type_vocab_size UpperCAmelCase_ = layer_norm_eps UpperCAmelCase_ = use_cache UpperCAmelCase_ = rescale_embeddings UpperCAmelCase_ = attention_type UpperCAmelCase_ = use_bias UpperCAmelCase_ = block_size UpperCAmelCase_ = num_random_blocks UpperCAmelCase_ = classifier_dropout class __A ( UpperCamelCase__ ): @property def _lowercase (self : Dict ): if self.task == "multiple-choice": UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"} else: UpperCAmelCase_ = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
1
'''simple docstring''' import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class snake_case__ ( unittest.TestCase): def __init__( self : int , _A : List[str] , _A : Dict=7 , _A : List[str]=3 , _A : List[str]=18 , _A : Dict=30 , _A : Union[str, Any]=4_00 , _A : List[str]=True , _A : List[str]=None , _A : int=True , _A : Tuple=None , _A : Union[str, Any]=True , _A : Tuple=[0.5, 0.5, 0.5] , _A : Union[str, Any]=[0.5, 0.5, 0.5] , _A : Tuple=False , ) -> List[Any]: UpperCAmelCase_ : Union[str, Any] = size if size is not None else {'''height''': 20, '''width''': 20} UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} UpperCAmelCase_ : Tuple = parent UpperCAmelCase_ : Optional[int] = batch_size UpperCAmelCase_ : Any = num_channels UpperCAmelCase_ : Optional[Any] = image_size UpperCAmelCase_ : Tuple = min_resolution UpperCAmelCase_ : Tuple = max_resolution UpperCAmelCase_ : Optional[int] = do_resize UpperCAmelCase_ : Tuple = size UpperCAmelCase_ : Optional[Any] = do_center_crop UpperCAmelCase_ : Optional[int] = crop_size UpperCAmelCase_ : Tuple = do_normalize UpperCAmelCase_ : Optional[Any] = image_mean UpperCAmelCase_ : int = image_std UpperCAmelCase_ : List[Any] = do_reduce_labels def A ( self : Union[str, Any] ) -> str: return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def __UpperCAmelCase ( ) -> Optional[Any]: UpperCAmelCase_ : Union[str, Any] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) UpperCAmelCase_ : Optional[Any] = Image.open(dataset[0]['''file'''] ) UpperCAmelCase_ : str = Image.open(dataset[1]['''file'''] ) return image, map def __UpperCAmelCase ( ) -> Any: UpperCAmelCase_ : int = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) UpperCAmelCase_ : int = Image.open(ds[0]['''file'''] ) UpperCAmelCase_ : Optional[Any] = Image.open(ds[1]['''file'''] ) UpperCAmelCase_ : Dict = Image.open(ds[2]['''file'''] ) UpperCAmelCase_ : List[str] = Image.open(ds[3]['''file'''] ) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class snake_case__ ( UpperCamelCase , unittest.TestCase): a_ = BeitImageProcessor if is_vision_available() else None def A ( self : Optional[Any] ) -> Union[str, Any]: UpperCAmelCase_ : Tuple = BeitImageProcessingTester(self ) @property def A ( self : List[Any] ) -> Tuple: return self.image_processor_tester.prepare_image_processor_dict() def A ( self : List[Any] ) -> Optional[Any]: UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , '''do_resize''' ) ) self.assertTrue(hasattr(_A , '''size''' ) ) self.assertTrue(hasattr(_A , '''do_center_crop''' ) ) self.assertTrue(hasattr(_A , '''center_crop''' ) ) self.assertTrue(hasattr(_A , '''do_normalize''' ) ) self.assertTrue(hasattr(_A , '''image_mean''' ) ) self.assertTrue(hasattr(_A , '''image_std''' ) ) def A ( self : List[str] ) -> Optional[int]: UpperCAmelCase_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) self.assertEqual(image_processor.do_reduce_labels , _A ) UpperCAmelCase_ : Union[str, Any] = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_A ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) self.assertEqual(image_processor.do_reduce_labels , _A ) def A ( self : Optional[Any] ) -> Any: pass def A ( self : List[str] ) -> Optional[int]: # Initialize image_processing UpperCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched UpperCAmelCase_ : Any = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def A ( self : Union[str, Any] ) -> Union[str, Any]: # Initialize image_processing UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input UpperCAmelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched UpperCAmelCase_ : int = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def A ( self : Optional[int] ) -> str: # Initialize image_processing UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input UpperCAmelCase_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched UpperCAmelCase_ : int = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def A ( self : Any ) -> Optional[Any]: # Initialize image_processing UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) UpperCAmelCase_ : Union[str, Any] = [] for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) maps.append(torch.zeros(image.shape[-2:] ).long() ) # Test not batched input UpperCAmelCase_ : str = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 1, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 ) # Test batched UpperCAmelCase_ : List[Any] = image_processing(_A , _A , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 ) # Test not batched input (PIL images) UpperCAmelCase_ , UpperCAmelCase_ : Any = prepare_semantic_single_inputs() UpperCAmelCase_ : List[str] = image_processing(_A , _A , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 1, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 ) # Test batched input (PIL images) UpperCAmelCase_ , UpperCAmelCase_ : List[str] = prepare_semantic_batch_inputs() UpperCAmelCase_ : int = image_processing(_A , _A , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 2, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 ) def A ( self : List[Any] ) -> Union[str, Any]: # Initialize image_processing UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 UpperCAmelCase_ , UpperCAmelCase_ : Any = prepare_semantic_single_inputs() UpperCAmelCase_ : Dict = image_processing(_A , _A , return_tensors='''pt''' ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 1_50 ) UpperCAmelCase_ : int = True UpperCAmelCase_ : Dict = image_processing(_A , _A , return_tensors='''pt''' ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
304
0
'''simple docstring''' import copy from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging lowerCAmelCase : Optional[int] =logging.get_logger(__name__) class a_ ( _lowerCAmelCase ): __A = ["input_features"] def __init__( self : Any , lowercase : Tuple=80 , lowercase : Optional[int]=16_000 , lowercase : Optional[Any]=160 , lowercase : Optional[int]=30 , lowercase : List[Any]=400 , lowercase : Dict=0.0 , lowercase : Tuple=False , **lowercase : Optional[int] , ): """simple docstring""" super().__init__( feature_size=lowercase , sampling_rate=lowercase , padding_value=lowercase , return_attention_mask=lowercase , **lowercase , ) lowercase_ :Optional[int] = n_fft lowercase_ :List[Any] = hop_length lowercase_ :Tuple = chunk_length lowercase_ :List[str] = chunk_length * sampling_rate lowercase_ :Optional[Any] = self.n_samples // hop_length lowercase_ :Any = sampling_rate lowercase_ :List[Any] = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowercase , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=lowercase , norm="slaney" , mel_scale="slaney" , ) def lowercase__ ( self : str , lowercase : np.array ): """simple docstring""" lowercase_ :Any = spectrogram( lowercase , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , ) lowercase_ :Any = log_spec[:, :-1] lowercase_ :List[Any] = np.maximum(lowercase , log_spec.max() - 8.0 ) lowercase_ :Dict = (log_spec + 4.0) / 4.0 return log_spec @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def lowercase__ ( lowercase : List[np.ndarray] , lowercase : List[np.ndarray] , lowercase : float = 0.0 ): """simple docstring""" if attention_mask is not None: lowercase_ :Optional[int] = np.array(lowercase , np.intaa ) lowercase_ :Any = [] for vector, length in zip(lowercase , attention_mask.sum(-1 ) ): lowercase_ :Dict = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 ) if length < normed_slice.shape[0]: lowercase_ :List[Any] = padding_value normed_input_values.append(lowercase ) else: lowercase_ :List[Any] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values] return normed_input_values def __call__( self : Tuple , lowercase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowercase : bool = True , lowercase : Optional[int] = None , lowercase : Optional[Union[str, TensorType]] = None , lowercase : Optional[bool] = None , lowercase : Optional[str] = "max_length" , lowercase : Optional[int] = None , lowercase : Optional[int] = None , lowercase : Optional[bool] = None , **lowercase : Union[str, Any] , ): """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a' F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input' F' was sampled with {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) lowercase_ :List[str] = isinstance(lowercase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'Only mono-channel audio is supported for input to {self}' ) lowercase_ :Optional[Any] = is_batched_numpy or ( isinstance(lowercase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: lowercase_ :Any = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(lowercase , np.ndarray ): lowercase_ :List[Any] = np.asarray(lowercase , dtype=np.floataa ) elif isinstance(lowercase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowercase_ :Union[str, Any] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowercase_ :Optional[int] = [np.asarray([raw_speech] ).T] lowercase_ :int = BatchFeature({"input_features": raw_speech} ) # convert into correct format for padding lowercase_ :Tuple = self.pad( lowercase , padding=lowercase , max_length=max_length if max_length else self.n_samples , truncation=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=return_attention_mask or do_normalize , ) # zero-mean and unit-variance normalization if do_normalize: lowercase_ :Union[str, Any] = self.zero_mean_unit_var_norm( padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , ) lowercase_ :List[Any] = np.stack(padded_inputs["input_features"] , axis=0 ) # make sure list is in array format lowercase_ :Union[str, Any] = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 ) lowercase_ :List[str] = [self._np_extract_fbank_features(lowercase ) for waveform in input_features[0]] if isinstance(input_features[0] , lowercase ): lowercase_ :Tuple = [np.asarray(lowercase , dtype=np.floataa ) for feature in input_features] else: lowercase_ :Union[str, Any] = input_features if return_attention_mask: # rescale from sample (48000) to feature (3000) lowercase_ :Dict = padded_inputs["attention_mask"][:, :: self.hop_length] if return_tensors is not None: lowercase_ :Tuple = padded_inputs.convert_to_tensors(lowercase ) return padded_inputs def lowercase__ ( self : List[str] ): """simple docstring""" lowercase_ :Union[str, Any] = copy.deepcopy(self.__dict__ ) lowercase_ :List[str] = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] return output
365
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : Tuple =logging.get_logger(__name__) lowerCAmelCase : str ={ '''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''', # See all Cvt models at https://huggingface.co/models?filter=cvt } class a_ ( _lowerCAmelCase ): __A = "cvt" def __init__( self : Tuple , lowercase : str=3 , lowercase : str=[7, 3, 3] , lowercase : List[str]=[4, 2, 2] , lowercase : Dict=[2, 1, 1] , lowercase : int=[64, 192, 384] , lowercase : Dict=[1, 3, 6] , lowercase : Dict=[1, 2, 10] , lowercase : Any=[4.0, 4.0, 4.0] , lowercase : Tuple=[0.0, 0.0, 0.0] , lowercase : List[str]=[0.0, 0.0, 0.0] , lowercase : List[str]=[0.0, 0.0, 0.1] , lowercase : Any=[True, True, True] , lowercase : Any=[False, False, True] , lowercase : Optional[Any]=["dw_bn", "dw_bn", "dw_bn"] , lowercase : int=[3, 3, 3] , lowercase : str=[1, 1, 1] , lowercase : List[Any]=[2, 2, 2] , lowercase : Tuple=[1, 1, 1] , lowercase : Optional[Any]=[1, 1, 1] , lowercase : str=0.02 , lowercase : str=1e-1_2 , **lowercase : str , ): """simple docstring""" super().__init__(**lowercase ) lowercase_ :List[Any] = num_channels lowercase_ :Union[str, Any] = patch_sizes lowercase_ :Tuple = patch_stride lowercase_ :List[Any] = patch_padding lowercase_ :List[Any] = embed_dim lowercase_ :Union[str, Any] = num_heads lowercase_ :Any = depth lowercase_ :str = mlp_ratio lowercase_ :List[str] = attention_drop_rate lowercase_ :List[Any] = drop_rate lowercase_ :Union[str, Any] = drop_path_rate lowercase_ :Any = qkv_bias lowercase_ :Dict = cls_token lowercase_ :int = qkv_projection_method lowercase_ :Union[str, Any] = kernel_qkv lowercase_ :Optional[Any] = padding_kv lowercase_ :Optional[Any] = stride_kv lowercase_ :Dict = padding_q lowercase_ :Any = stride_q lowercase_ :Dict = initializer_range lowercase_ :Optional[Any] = layer_norm_eps
147
0
from packaging import version from .import_utils import is_accelerate_available if is_accelerate_available(): import accelerate def lowerCAmelCase__ ( lowerCamelCase_ : str): '''simple docstring''' if not is_accelerate_available(): return method lowerCAmelCase__ : Optional[Any] = version.parse(accelerate.__version__).base_version if version.parse(lowerCamelCase_) < version.parse('''0.17.0'''): return method def wrapper(self : str ,*lowerCamelCase_ : Union[str, Any] ,**lowerCamelCase_ : List[Any]): if hasattr(self ,'''_hf_hook''') and hasattr(self._hf_hook ,'''pre_forward'''): self._hf_hook.pre_forward(self) return method(self ,*lowerCamelCase_ ,**lowerCamelCase_) return wrapper
129
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""")) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , ) @pytest.mark.usefixtures("""sm_env""") @parameterized_class( [ { """framework""": """pytorch""", """script""": """run_glue.py""", """model_name_or_path""": """distilbert-base-cased""", """instance_type""": """ml.p3.16xlarge""", """results""": {"""train_runtime""": 650, """eval_accuracy""": 0.7, """eval_loss""": 0.6}, }, { """framework""": """pytorch""", """script""": """run_ddp.py""", """model_name_or_path""": """distilbert-base-cased""", """instance_type""": """ml.p3.16xlarge""", """results""": {"""train_runtime""": 600, """eval_accuracy""": 0.7, """eval_loss""": 0.6}, }, { """framework""": """tensorflow""", """script""": """run_tf_dist.py""", """model_name_or_path""": """distilbert-base-cased""", """instance_type""": """ml.p3.16xlarge""", """results""": {"""train_runtime""": 600, """eval_accuracy""": 0.6, """eval_loss""": 0.7}, }, ]) class lowerCamelCase__ ( unittest.TestCase): '''simple docstring''' def lowerCAmelCase__ (self ) -> List[str]: """simple docstring""" if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding='''utf-8''' ,check=__lowerCamelCase ,) assert hasattr(self ,'''env''' ) def lowerCAmelCase__ (self ,__lowerCamelCase ) -> str: """simple docstring""" lowerCAmelCase__ : Optional[int] = f"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}""" # distributed data settings lowerCAmelCase__ : Optional[Any] = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None # creates estimator return HuggingFace( entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=__lowerCamelCase ,instance_count=__lowerCamelCase ,instance_type=self.instance_type ,debugger_hook_config=__lowerCamelCase ,hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} ,metric_definitions=self.env.metric_definitions ,distribution=__lowerCamelCase ,py_version='''py36''' ,) def lowerCAmelCase__ (self ,__lowerCamelCase ) -> str: """simple docstring""" TrainingJobAnalytics(__lowerCamelCase ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(2,)] ) def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Optional[Any]: """simple docstring""" lowerCAmelCase__ : Optional[Any] = self.create_estimator(__lowerCamelCase ) # run training estimator.fit() # result dataframe lowerCAmelCase__ : Dict = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis lowerCAmelCase__ : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] ) lowerCAmelCase__ : Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping lowerCAmelCase__ : Optional[Any] = ( Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' ,99_99_99 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy ) assert all(t <= self.results['''eval_loss'''] for t in eval_loss ) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" ,'''w''' ) as outfile: json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} ,__lowerCamelCase )
129
1
'''simple docstring''' from math import sqrt def lowercase (_A ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(sqrt(_A ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowercase (_A = 1_0_0_0_1 ): """simple docstring""" _lowerCAmelCase : Tuple = 0 _lowerCAmelCase : Optional[Any] = 1 while count != nth and number < 3: number += 1 if is_prime(_A ): count += 1 while count != nth: number += 2 if is_prime(_A ): count += 1 return number if __name__ == "__main__": print(F'''{solution() = }''')
25
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaInpaintPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __magic_name__ = KandinskyVaaInpaintPipeline __magic_name__ = ["image_embeds", "negative_image_embeds", "image", "mask_image"] __magic_name__ = [ "image_embeds", "negative_image_embeds", "image", "mask_image", ] __magic_name__ = [ "generator", "height", "width", "latents", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] __magic_name__ = False @property def a ( self ): '''simple docstring''' return 32 @property def a ( self ): '''simple docstring''' return 32 @property def a ( self ): '''simple docstring''' return self.time_input_dim @property def a ( self ): '''simple docstring''' return self.time_input_dim * 4 @property def a ( self ): '''simple docstring''' return 100 @property def a ( self ): '''simple docstring''' torch.manual_seed(0 ) _lowerCAmelCase : Optional[int] = { 'in_channels': 9, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'image', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } _lowerCAmelCase : Union[str, Any] = UNetaDConditionModel(**snake_case__ ) return model @property def a ( self ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def a ( self ): '''simple docstring''' torch.manual_seed(0 ) _lowerCAmelCase : Dict = VQModel(**self.dummy_movq_kwargs ) return model def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.dummy_unet _lowerCAmelCase : List[Any] = self.dummy_movq _lowerCAmelCase : Union[str, Any] = DDIMScheduler( num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type='epsilon' , thresholding=snake_case__ , ) _lowerCAmelCase : Any = { 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def a ( self , snake_case__ , snake_case__=0 ): '''simple docstring''' _lowerCAmelCase : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ ) _lowerCAmelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( snake_case__ ) # create init_image _lowerCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ ) _lowerCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(snake_case__ ) ).convert('RGB' ).resize((256, 256) ) # create mask _lowerCAmelCase : List[str] = np.ones((64, 64) , dtype=np.floataa ) _lowerCAmelCase : Dict = 0 if str(snake_case__ ).startswith('mps' ): _lowerCAmelCase : Optional[Any] = torch.manual_seed(snake_case__ ) else: _lowerCAmelCase : List[Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) _lowerCAmelCase : Optional[int] = { 'image': init_image, 'mask_image': mask, 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'generator': generator, 'height': 64, 'width': 64, 'num_inference_steps': 2, 'guidance_scale': 4.0, 'output_type': 'np', } return inputs def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = 'cpu' _lowerCAmelCase : int = self.get_dummy_components() _lowerCAmelCase : Dict = self.pipeline_class(**snake_case__ ) _lowerCAmelCase : Optional[int] = pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) _lowerCAmelCase : Union[str, Any] = pipe(**self.get_dummy_inputs(snake_case__ ) ) _lowerCAmelCase : int = output.images _lowerCAmelCase : int = pipe( **self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0] _lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1] _lowerCAmelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1] print(F'image.shape {image.shape}' ) assert image.shape == (1, 64, 64, 3) _lowerCAmelCase : List[str] = np.array( [0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F' expected_slice {expected_slice}, but got {image_slice.flatten()}' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}' def a ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' ) _lowerCAmelCase : List[str] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' ) _lowerCAmelCase : Dict = np.ones((768, 768) , dtype=np.floataa ) _lowerCAmelCase : Tuple = 0 _lowerCAmelCase : List[str] = 'a hat' _lowerCAmelCase : Any = KandinskyVaaPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa ) pipe_prior.to(snake_case__ ) _lowerCAmelCase : Union[str, Any] = KandinskyVaaInpaintPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa ) _lowerCAmelCase : Optional[Any] = pipeline.to(snake_case__ ) pipeline.set_progress_bar_config(disable=snake_case__ ) _lowerCAmelCase : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 ) _lowerCAmelCase , _lowerCAmelCase : Dict = pipe_prior( snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple() _lowerCAmelCase : Optional[Any] = pipeline( image=snake_case__ , mask_image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=100 , height=768 , width=768 , output_type='np' , ) _lowerCAmelCase : Union[str, Any] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(snake_case__ , snake_case__ )
25
1
def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2 def A ( _lowerCamelCase , _lowerCamelCase=0 ): '''simple docstring''' return sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[column] ) def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=float("inf" ) ): '''simple docstring''' for i in range(points_counts - 1 ): for j in range(i + 1 , _lowerCamelCase ): _lowerCAmelCase : int = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: _lowerCAmelCase : List[str] = current_dis return min_dis def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=float("inf" ) ): '''simple docstring''' for i in range(min(6 , points_counts - 1 ) , _lowerCamelCase ): for j in range(max(0 , i - 6 ) , _lowerCamelCase ): _lowerCAmelCase : str = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: _lowerCAmelCase : List[Any] = current_dis return min_dis def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' if points_counts <= 3: return dis_between_closest_pair(_lowerCamelCase , _lowerCamelCase ) # recursion _lowerCAmelCase : Any = points_counts // 2 _lowerCAmelCase : Dict = closest_pair_of_points_sqr( _lowerCamelCase , points_sorted_on_y[:mid] , _lowerCamelCase ) _lowerCAmelCase : str = closest_pair_of_points_sqr( _lowerCamelCase , points_sorted_on_y[mid:] , points_counts - mid ) _lowerCAmelCase : Union[str, Any] = min(_lowerCamelCase , _lowerCamelCase ) _lowerCAmelCase : str = [] for point in points_sorted_on_x: if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis: cross_strip.append(_lowerCamelCase ) _lowerCAmelCase : str = dis_between_closest_in_strip( _lowerCamelCase , len(_lowerCamelCase ) , _lowerCamelCase ) return min(_lowerCamelCase , _lowerCamelCase ) def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Any = column_based_sort(_lowerCamelCase , column=0 ) _lowerCAmelCase : Optional[int] = column_based_sort(_lowerCamelCase , column=1 ) return ( closest_pair_of_points_sqr( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ) ** 0.5 if __name__ == "__main__": _snake_case = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)] print("Distance:", closest_pair_of_points(points, len(points)))
36
def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' if density <= 0: raise ValueError("Impossible fluid density" ) if bulk_modulus <= 0: raise ValueError("Impossible bulk modulus" ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
36
1
'''simple docstring''' import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __lowercase: int = logging.get_logger(__name__) __lowercase: int = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} # See all BART models at https://huggingface.co/models?filter=bart __lowercase: Optional[int] = { "vocab_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json", }, "merges_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt", }, } __lowercase: Optional[Any] = { "facebook/bart-base": 1_024, "facebook/bart-large": 1_024, "facebook/bart-large-mnli": 1_024, "facebook/bart-large-cnn": 1_024, "facebook/bart-large-xsum": 1_024, "yjernite/bart_eli5": 1_024, } @lru_cache() def SCREAMING_SNAKE_CASE__( ) -> Dict: '''simple docstring''' UpperCamelCase__ = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) UpperCamelCase__ = bs[:] UpperCamelCase__ = 0 for b in range(2**8 ): if b not in bs: bs.append(_UpperCamelCase ) cs.append(2**8 + n ) n += 1 UpperCamelCase__ = [chr(_UpperCamelCase ) for n in cs] return dict(zip(_UpperCamelCase , _UpperCamelCase ) ) def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[Any] ) -> int: '''simple docstring''' UpperCamelCase__ = set() UpperCamelCase__ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) UpperCamelCase__ = char return pairs class UpperCAmelCase ( SCREAMING_SNAKE_CASE__): _lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES _lowerCamelCase : str = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase : List[str] = ['input_ids', 'attention_mask'] def __init__( self : Optional[int], a_ : int, a_ : List[str], a_ : Optional[int]="replace", a_ : int="<s>", a_ : List[Any]="</s>", a_ : List[Any]="</s>", a_ : Tuple="<s>", a_ : Tuple="<unk>", a_ : Any="<pad>", a_ : Optional[int]="<mask>", a_ : Any=False, **a_ : Optional[Any], ): """simple docstring""" UpperCamelCase__ = AddedToken(a_, lstrip=a_, rstrip=a_ ) if isinstance(a_, a_ ) else bos_token UpperCamelCase__ = AddedToken(a_, lstrip=a_, rstrip=a_ ) if isinstance(a_, a_ ) else eos_token UpperCamelCase__ = AddedToken(a_, lstrip=a_, rstrip=a_ ) if isinstance(a_, a_ ) else sep_token UpperCamelCase__ = AddedToken(a_, lstrip=a_, rstrip=a_ ) if isinstance(a_, a_ ) else cls_token UpperCamelCase__ = AddedToken(a_, lstrip=a_, rstrip=a_ ) if isinstance(a_, a_ ) else unk_token UpperCamelCase__ = AddedToken(a_, lstrip=a_, rstrip=a_ ) if isinstance(a_, a_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it UpperCamelCase__ = AddedToken(a_, lstrip=a_, rstrip=a_ ) if isinstance(a_, a_ ) else mask_token super().__init__( errors=a_, bos_token=a_, eos_token=a_, unk_token=a_, sep_token=a_, cls_token=a_, pad_token=a_, mask_token=a_, add_prefix_space=a_, **a_, ) with open(a_, encoding="utf-8" ) as vocab_handle: UpperCamelCase__ = json.load(a_ ) UpperCamelCase__ = {v: k for k, v in self.encoder.items()} UpperCamelCase__ = errors # how to handle errors in decoding UpperCamelCase__ = bytes_to_unicode() UpperCamelCase__ = {v: k for k, v in self.byte_encoder.items()} with open(a_, encoding="utf-8" ) as merges_handle: UpperCamelCase__ = merges_handle.read().split("\n" )[1:-1] UpperCamelCase__ = [tuple(merge.split() ) for merge in bpe_merges] UpperCamelCase__ = dict(zip(a_, range(len(a_ ) ) ) ) UpperCamelCase__ = {} UpperCamelCase__ = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions UpperCamelCase__ = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property def lowercase_ ( self : int ): """simple docstring""" return len(self.encoder ) def lowercase_ ( self : List[str] ): """simple docstring""" return dict(self.encoder, **self.added_tokens_encoder ) def lowercase_ ( self : List[Any], a_ : int ): """simple docstring""" if token in self.cache: return self.cache[token] UpperCamelCase__ = tuple(a_ ) UpperCamelCase__ = get_pairs(a_ ) if not pairs: return token while True: UpperCamelCase__ = min(a_, key=lambda a_ : self.bpe_ranks.get(a_, float("inf" ) ) ) if bigram not in self.bpe_ranks: break UpperCamelCase__ , UpperCamelCase__ = bigram UpperCamelCase__ = [] UpperCamelCase__ = 0 while i < len(a_ ): try: UpperCamelCase__ = word.index(a_, a_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) UpperCamelCase__ = j if word[i] == first and i < len(a_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 UpperCamelCase__ = tuple(a_ ) UpperCamelCase__ = new_word if len(a_ ) == 1: break else: UpperCamelCase__ = get_pairs(a_ ) UpperCamelCase__ = " ".join(a_ ) UpperCamelCase__ = word return word def lowercase_ ( self : Any, a_ : List[str] ): """simple docstring""" UpperCamelCase__ = [] for token in re.findall(self.pat, a_ ): UpperCamelCase__ = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(a_ ).split(" " ) ) return bpe_tokens def lowercase_ ( self : Any, a_ : Optional[int] ): """simple docstring""" return self.encoder.get(a_, self.encoder.get(self.unk_token ) ) def lowercase_ ( self : Optional[int], a_ : str ): """simple docstring""" return self.decoder.get(a_ ) def lowercase_ ( self : List[str], a_ : List[str] ): """simple docstring""" UpperCamelCase__ = "".join(a_ ) UpperCamelCase__ = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8", errors=self.errors ) return text def lowercase_ ( self : Tuple, a_ : str, a_ : Optional[str] = None ): """simple docstring""" if not os.path.isdir(a_ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return UpperCamelCase__ = os.path.join( a_, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) UpperCamelCase__ = os.path.join( a_, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(a_, "w", encoding="utf-8" ) as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=a_, ensure_ascii=a_ ) + "\n" ) UpperCamelCase__ = 0 with open(a_, "w", encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda a_ : kv[1] ): if index != token_index: logger.warning( f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.' " Please check that the tokenizer is not corrupted!" ) UpperCamelCase__ = token_index writer.write(" ".join(a_ ) + "\n" ) index += 1 return vocab_file, merge_file def lowercase_ ( self : Optional[Any], a_ : List[int], a_ : Optional[List[int]] = None ): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCamelCase__ = [self.cls_token_id] UpperCamelCase__ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowercase_ ( self : str, a_ : List[int], a_ : Optional[List[int]] = None, a_ : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=a_, token_ids_a=a_, already_has_special_tokens=a_ ) if token_ids_a is None: return [1] + ([0] * len(a_ )) + [1] return [1] + ([0] * len(a_ )) + [1, 1] + ([0] * len(a_ )) + [1] def lowercase_ ( self : Tuple, a_ : List[int], a_ : Optional[List[int]] = None ): """simple docstring""" UpperCamelCase__ = [self.sep_token_id] UpperCamelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowercase_ ( self : Tuple, a_ : Dict, a_ : str=False, **a_ : Optional[Any] ): """simple docstring""" UpperCamelCase__ = kwargs.pop("add_prefix_space", self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(a_ ) > 0 and not text[0].isspace()): UpperCamelCase__ = " " + text return (text, kwargs)
368
'''simple docstring''' from __future__ import annotations def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float , ) -> tuple: '''simple docstring''' if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1: raise ValueError("You cannot supply more or less than 2 values" ) elif electron_conc < 0: raise ValueError("Electron concentration cannot be negative in a semiconductor" ) elif hole_conc < 0: raise ValueError("Hole concentration cannot be negative in a semiconductor" ) elif intrinsic_conc < 0: raise ValueError( "Intrinsic concentration cannot be negative in a semiconductor" ) elif electron_conc == 0: return ( "electron_conc", intrinsic_conc**2 / hole_conc, ) elif hole_conc == 0: return ( "hole_conc", intrinsic_conc**2 / electron_conc, ) elif intrinsic_conc == 0: return ( "intrinsic_conc", (electron_conc * hole_conc) ** 0.5, ) else: return (-1, -1) if __name__ == "__main__": import doctest doctest.testmod()
31
0
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( UniSpeechConfig, UniSpeechForCTC, UniSpeechForPreTraining, WavaVecaFeatureExtractor, WavaVecaPhonemeCTCTokenizer, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __lowercase = logging.get_logger(__name__) __lowercase = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''ctc_proj''', '''mask_emb''': '''masked_spec_embed''', } __lowercase = [ '''ctc_proj''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' for attribute in key.split('''.''' ): if is_finetuned: if attribute in ["quantizer", "project_q", "project_hid"]: # those layers are only relevant for pretraining and should be dropped return if attribute == "ctc_proj": # we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models __UpperCamelCase :str = '''lm_head''' __UpperCamelCase :Any = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if weight_type is not None: __UpperCamelCase :int = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape else: __UpperCamelCase :Any = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": __UpperCamelCase :Optional[Any] = value elif weight_type == "weight_g": __UpperCamelCase :Any = value elif weight_type == "weight_v": __UpperCamelCase :Any = value elif weight_type == "bias": __UpperCamelCase :Any = value else: __UpperCamelCase :Optional[Any] = value logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase :Any = [] __UpperCamelCase :Tuple = fairseq_model.state_dict() __UpperCamelCase :Tuple = hf_model.unispeech.feature_extractor for name, value in fairseq_dict.items(): __UpperCamelCase :Optional[Any] = False if "conv_layers" in name: load_conv_layer( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , ) __UpperCamelCase :List[Any] = True else: for key, mapped_key in MAPPING.items(): __UpperCamelCase :Optional[int] = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: __UpperCamelCase :Optional[int] = True if "*" in mapped_key: __UpperCamelCase :str = name.split(SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2] __UpperCamelCase :Tuple = mapped_key.replace('''*''' , SCREAMING_SNAKE_CASE ) if "weight_g" in name: __UpperCamelCase :Union[str, Any] = '''weight_g''' elif "weight_v" in name: __UpperCamelCase :Optional[int] = '''weight_v''' elif "bias" in name: __UpperCamelCase :str = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj __UpperCamelCase :int = '''weight''' else: __UpperCamelCase :str = None set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) continue if not is_used: unused_weights.append(SCREAMING_SNAKE_CASE ) logger.warning(f"""Unused weights: {unused_weights}""" ) def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase :Optional[int] = full_name.split('''conv_layers.''' )[-1] __UpperCamelCase :Optional[int] = name.split('''.''' ) __UpperCamelCase :int = int(items[0] ) __UpperCamelCase :List[str] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __UpperCamelCase :Union[str, Any] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __UpperCamelCase :int = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) __UpperCamelCase :str = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) __UpperCamelCase :List[str] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(SCREAMING_SNAKE_CASE ) @torch.no_grad() def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True ): '''simple docstring''' if config_path is not None: __UpperCamelCase :str = UniSpeechConfig.from_pretrained(SCREAMING_SNAKE_CASE ) else: __UpperCamelCase :Optional[Any] = UniSpeechConfig() if is_finetuned: if dict_path: __UpperCamelCase :Tuple = Dictionary.load_from_json(SCREAMING_SNAKE_CASE ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __UpperCamelCase :Tuple = target_dict.pad_index __UpperCamelCase :Any = target_dict.bos_index __UpperCamelCase :List[str] = target_dict.eos_index __UpperCamelCase :int = len(target_dict.symbols ) __UpperCamelCase :Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' ) if not os.path.isdir(SCREAMING_SNAKE_CASE ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE ) ) return os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE ) __UpperCamelCase :List[str] = target_dict.indices # fairseq has the <pad> and <s> switched __UpperCamelCase :int = 42 __UpperCamelCase :str = 43 with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) __UpperCamelCase :Optional[int] = WavaVecaPhonemeCTCTokenizer( SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=SCREAMING_SNAKE_CASE , ) __UpperCamelCase :int = True if config.feat_extract_norm == '''layer''' else False __UpperCamelCase :List[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , ) __UpperCamelCase :List[str] = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE ) processor.save_pretrained(SCREAMING_SNAKE_CASE ) __UpperCamelCase :str = UniSpeechForCTC(SCREAMING_SNAKE_CASE ) else: __UpperCamelCase :List[Any] = UniSpeechForPreTraining(SCREAMING_SNAKE_CASE ) if is_finetuned: __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :int = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} ) else: __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) __UpperCamelCase :Optional[int] = model[0].eval() recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) hf_unispeech.save_pretrained(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __lowercase = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) __lowercase = parser.parse_args() convert_unispeech_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
43
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCamelCase_ = { """configuration_groupvit""": [ """GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GroupViTConfig""", """GroupViTOnnxConfig""", """GroupViTTextConfig""", """GroupViTVisionConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ """GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GroupViTModel""", """GroupViTPreTrainedModel""", """GroupViTTextModel""", """GroupViTVisionModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ """TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFGroupViTModel""", """TFGroupViTPreTrainedModel""", """TFGroupViTTextModel""", """TFGroupViTVisionModel""", ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
309
0
'''simple docstring''' import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class __UpperCamelCase ( __snake_case ): lowercase : Optional[int] = (PNDMScheduler,) lowercase : Optional[int] = (('num_inference_steps', 5_0),) def a__ ( self :List[Any] ,**_UpperCamelCase :List[Any] ): snake_case_ : Optional[Any] = { """num_train_timesteps""": 1_0_0_0, """beta_start""": 0.00_01, """beta_end""": 0.02, """beta_schedule""": """linear""", } config.update(**_UpperCamelCase ) return config def a__ ( self :Optional[int] ,_UpperCamelCase :Any=0 ,**_UpperCamelCase :Any ): snake_case_ : str = dict(self.forward_default_kwargs ) snake_case_ : str = kwargs.pop("""num_inference_steps""" ,_UpperCamelCase ) snake_case_ : List[Any] = self.dummy_sample snake_case_ : Tuple = 0.1 * sample snake_case_ : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: snake_case_ : Optional[Any] = self.get_scheduler_config(**_UpperCamelCase ) snake_case_ : Tuple = scheduler_class(**_UpperCamelCase ) scheduler.set_timesteps(_UpperCamelCase ) # copy over dummy past residuals snake_case_ : List[str] = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_UpperCamelCase ) snake_case_ : Optional[Any] = scheduler_class.from_pretrained(_UpperCamelCase ) new_scheduler.set_timesteps(_UpperCamelCase ) # copy over dummy past residuals snake_case_ : int = dummy_past_residuals[:] snake_case_ : Dict = scheduler.step_prk(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,**_UpperCamelCase ).prev_sample snake_case_ : int = new_scheduler.step_prk(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,**_UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" snake_case_ : int = scheduler.step_plms(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,**_UpperCamelCase ).prev_sample snake_case_ : Dict = new_scheduler.step_plms(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,**_UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def a__ ( self :int ): pass def a__ ( self :Any ,_UpperCamelCase :List[str]=0 ,**_UpperCamelCase :Union[str, Any] ): snake_case_ : Dict = dict(self.forward_default_kwargs ) snake_case_ : Any = kwargs.pop("""num_inference_steps""" ,_UpperCamelCase ) snake_case_ : List[str] = self.dummy_sample snake_case_ : List[Any] = 0.1 * sample snake_case_ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: snake_case_ : Optional[int] = self.get_scheduler_config() snake_case_ : Any = scheduler_class(**_UpperCamelCase ) scheduler.set_timesteps(_UpperCamelCase ) # copy over dummy past residuals (must be after setting timesteps) snake_case_ : List[str] = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_UpperCamelCase ) snake_case_ : str = scheduler_class.from_pretrained(_UpperCamelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(_UpperCamelCase ) # copy over dummy past residual (must be after setting timesteps) snake_case_ : Union[str, Any] = dummy_past_residuals[:] snake_case_ : Optional[Any] = scheduler.step_prk(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,**_UpperCamelCase ).prev_sample snake_case_ : List[Any] = new_scheduler.step_prk(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,**_UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" snake_case_ : str = scheduler.step_plms(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,**_UpperCamelCase ).prev_sample snake_case_ : int = new_scheduler.step_plms(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,**_UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def a__ ( self :List[Any] ,**_UpperCamelCase :Optional[Any] ): snake_case_ : Dict = self.scheduler_classes[0] snake_case_ : int = self.get_scheduler_config(**_UpperCamelCase ) snake_case_ : Union[str, Any] = scheduler_class(**_UpperCamelCase ) snake_case_ : Optional[int] = 1_0 snake_case_ : Optional[int] = self.dummy_model() snake_case_ : Union[str, Any] = self.dummy_sample_deter scheduler.set_timesteps(_UpperCamelCase ) for i, t in enumerate(scheduler.prk_timesteps ): snake_case_ : int = model(_UpperCamelCase ,_UpperCamelCase ) snake_case_ : Union[str, Any] = scheduler.step_prk(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): snake_case_ : List[str] = model(_UpperCamelCase ,_UpperCamelCase ) snake_case_ : Any = scheduler.step_plms(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ).prev_sample return sample def a__ ( self :Optional[Any] ): snake_case_ : Any = dict(self.forward_default_kwargs ) snake_case_ : Optional[int] = kwargs.pop("""num_inference_steps""" ,_UpperCamelCase ) for scheduler_class in self.scheduler_classes: snake_case_ : List[Any] = self.get_scheduler_config() snake_case_ : Optional[Any] = scheduler_class(**_UpperCamelCase ) snake_case_ : List[str] = self.dummy_sample snake_case_ : Union[str, Any] = 0.1 * sample if num_inference_steps is not None and hasattr(_UpperCamelCase ,"""set_timesteps""" ): scheduler.set_timesteps(_UpperCamelCase ) elif num_inference_steps is not None and not hasattr(_UpperCamelCase ,"""set_timesteps""" ): snake_case_ : int = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) snake_case_ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] snake_case_ : Tuple = dummy_past_residuals[:] snake_case_ : Tuple = scheduler.step_prk(_UpperCamelCase ,0 ,_UpperCamelCase ,**_UpperCamelCase ).prev_sample snake_case_ : int = scheduler.step_prk(_UpperCamelCase ,1 ,_UpperCamelCase ,**_UpperCamelCase ).prev_sample self.assertEqual(output_a.shape ,sample.shape ) self.assertEqual(output_a.shape ,output_a.shape ) snake_case_ : str = scheduler.step_plms(_UpperCamelCase ,0 ,_UpperCamelCase ,**_UpperCamelCase ).prev_sample snake_case_ : List[str] = scheduler.step_plms(_UpperCamelCase ,1 ,_UpperCamelCase ,**_UpperCamelCase ).prev_sample self.assertEqual(output_a.shape ,sample.shape ) self.assertEqual(output_a.shape ,output_a.shape ) def a__ ( self :int ): for timesteps in [1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=_UpperCamelCase ) def a__ ( self :Tuple ): for steps_offset in [0, 1]: self.check_over_configs(steps_offset=_UpperCamelCase ) snake_case_ : int = self.scheduler_classes[0] snake_case_ : Tuple = self.get_scheduler_config(steps_offset=1 ) snake_case_ : Union[str, Any] = scheduler_class(**_UpperCamelCase ) scheduler.set_timesteps(1_0 ) assert torch.equal( scheduler.timesteps ,torch.LongTensor( [9_0_1, 8_5_1, 8_5_1, 8_0_1, 8_0_1, 7_5_1, 7_5_1, 7_0_1, 7_0_1, 6_5_1, 6_5_1, 6_0_1, 6_0_1, 5_0_1, 4_0_1, 3_0_1, 2_0_1, 1_0_1, 1] ) ,) def a__ ( self :Dict ): for beta_start, beta_end in zip([0.00_01, 0.0_01] ,[0.0_02, 0.02] ): self.check_over_configs(beta_start=_UpperCamelCase ,beta_end=_UpperCamelCase ) def a__ ( self :List[str] ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_UpperCamelCase ) def a__ ( self :Optional[int] ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_UpperCamelCase ) def a__ ( self :Tuple ): for t in [1, 5, 1_0]: self.check_over_forward(time_step=_UpperCamelCase ) def a__ ( self :Any ): for t, num_inference_steps in zip([1, 5, 1_0] ,[1_0, 5_0, 1_0_0] ): self.check_over_forward(num_inference_steps=_UpperCamelCase ) def a__ ( self :Union[str, Any] ): # earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3 snake_case_ : int = 2_7 for scheduler_class in self.scheduler_classes: snake_case_ : Optional[Any] = self.dummy_sample snake_case_ : Tuple = 0.1 * sample snake_case_ : Tuple = self.get_scheduler_config() snake_case_ : Optional[Any] = scheduler_class(**_UpperCamelCase ) scheduler.set_timesteps(_UpperCamelCase ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): snake_case_ : Optional[Any] = scheduler.step_prk(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ).prev_sample def a__ ( self :Union[str, Any] ): with self.assertRaises(_UpperCamelCase ): snake_case_ : Dict = self.scheduler_classes[0] snake_case_ : str = self.get_scheduler_config() snake_case_ : Any = scheduler_class(**_UpperCamelCase ) scheduler.step_plms(self.dummy_sample ,1 ,self.dummy_sample ).prev_sample def a__ ( self :List[str] ): snake_case_ : Any = self.full_loop() snake_case_ : Tuple = torch.sum(torch.abs(_UpperCamelCase ) ) snake_case_ : Dict = torch.mean(torch.abs(_UpperCamelCase ) ) assert abs(result_sum.item() - 1_98.13_18 ) < 1E-2 assert abs(result_mean.item() - 0.25_80 ) < 1E-3 def a__ ( self :List[Any] ): snake_case_ : Tuple = self.full_loop(prediction_type="""v_prediction""" ) snake_case_ : Tuple = torch.sum(torch.abs(_UpperCamelCase ) ) snake_case_ : Optional[int] = torch.mean(torch.abs(_UpperCamelCase ) ) assert abs(result_sum.item() - 67.39_86 ) < 1E-2 assert abs(result_mean.item() - 0.08_78 ) < 1E-3 def a__ ( self :List[str] ): # We specify different beta, so that the first alpha is 0.99 snake_case_ : Tuple = self.full_loop(set_alpha_to_one=_UpperCamelCase ,beta_start=0.01 ) snake_case_ : Optional[Any] = torch.sum(torch.abs(_UpperCamelCase ) ) snake_case_ : Tuple = torch.mean(torch.abs(_UpperCamelCase ) ) assert abs(result_sum.item() - 2_30.03_99 ) < 1E-2 assert abs(result_mean.item() - 0.29_95 ) < 1E-3 def a__ ( self :str ): # We specify different beta, so that the first alpha is 0.99 snake_case_ : Tuple = self.full_loop(set_alpha_to_one=_UpperCamelCase ,beta_start=0.01 ) snake_case_ : Union[str, Any] = torch.sum(torch.abs(_UpperCamelCase ) ) snake_case_ : List[Any] = torch.mean(torch.abs(_UpperCamelCase ) ) assert abs(result_sum.item() - 1_86.94_82 ) < 1E-2 assert abs(result_mean.item() - 0.24_34 ) < 1E-3
361
'''simple docstring''' from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer __A : Tuple = logging.get_logger(__name__) __A : List[Any] = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } __A : str = { 'vocab_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json' }, 'merges_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt' }, 'tokenizer_config_file': { 'facebook/blenderbot_small-90M': ( 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json' ) }, } __A : Optional[Any] = { 'facebook/blenderbot_small-90M': 512, } class __UpperCamelCase ( lowercase__ ): lowercase : str = VOCAB_FILES_NAMES lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : Dict = BlenderbotSmallTokenizer def __init__( self :str ,_UpperCamelCase :Optional[int]=None ,_UpperCamelCase :Union[str, Any]=None ,_UpperCamelCase :Tuple="<|endoftext|>" ,_UpperCamelCase :int="<|endoftext|>" ,_UpperCamelCase :Dict="<|endoftext|>" ,_UpperCamelCase :Optional[Any]=False ,_UpperCamelCase :List[Any]=True ,**_UpperCamelCase :Any ,): super().__init__( ByteLevelBPETokenizer( vocab=_UpperCamelCase ,merges=_UpperCamelCase ,add_prefix_space=_UpperCamelCase ,trim_offsets=_UpperCamelCase ,) ,bos_token=_UpperCamelCase ,eos_token=_UpperCamelCase ,unk_token=_UpperCamelCase ,**_UpperCamelCase ,) snake_case_ : Any = add_prefix_space def a__ ( self :Optional[Any] ,_UpperCamelCase :int ,_UpperCamelCase :Optional[Any]=None ): snake_case_ : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def a__ ( self :int ,_UpperCamelCase :List[int] ,_UpperCamelCase :Optional[List[int]] = None ): snake_case_ : int = [self.sep_token_id] snake_case_ : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
8
0
"""simple docstring""" from __future__ import annotations lowercase_ = 1.6_021e-19 # units = C def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float , ) -> tuple[str, float]: if (conductivity, electron_conc, mobility).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif conductivity < 0: raise ValueError('''Conductivity cannot be negative''' ) elif electron_conc < 0: raise ValueError('''Electron concentration cannot be negative''' ) elif mobility < 0: raise ValueError('''mobility cannot be negative''' ) elif conductivity == 0: return ( "conductivity", mobility * electron_conc * ELECTRON_CHARGE, ) elif electron_conc == 0: return ( "electron_conc", conductivity / (mobility * ELECTRON_CHARGE), ) else: return ( "mobility", conductivity / (electron_conc * ELECTRON_CHARGE), ) if __name__ == "__main__": import doctest doctest.testmod()
45
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowercase_ = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["XGLMTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["XGLMTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ "XGLM_PRETRAINED_MODEL_ARCHIVE_LIST", "XGLMForCausalLM", "XGLMModel", "XGLMPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ "FlaxXGLMForCausalLM", "FlaxXGLMModel", "FlaxXGLMPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ "TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXGLMForCausalLM", "TFXGLMModel", "TFXGLMPreTrainedModel", ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
45
1
"""simple docstring""" import gzip import hashlib import json import multiprocessing import os import re import shutil import time from pathlib import Path import numpy as np from arguments import PreprocessingArguments from datasets import load_dataset from minhash_deduplication import deduplicate_dataset from transformers import AutoTokenizer, HfArgumentParser lowercase__ = re.compile(r'\s+') def __a ( _SCREAMING_SNAKE_CASE ) ->str: return {"hash": hashlib.mda(re.sub(_SCREAMING_SNAKE_CASE , '' , example['content'] ).encode('utf-8' ) ).hexdigest()} def __a ( _SCREAMING_SNAKE_CASE ) ->Dict: a__: Dict = [len(_SCREAMING_SNAKE_CASE ) for line in example['content'].splitlines()] return {"line_mean": np.mean(_SCREAMING_SNAKE_CASE ), "line_max": max(_SCREAMING_SNAKE_CASE )} def __a ( _SCREAMING_SNAKE_CASE ) ->Any: a__: List[Any] = np.mean([c.isalnum() for c in example['content']] ) return {"alpha_frac": alpha_frac} def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]: if example["hash"] in uniques: uniques.remove(example['hash'] ) return True else: return False def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=5 ) ->Union[str, Any]: a__: Optional[Any] = ['auto-generated', 'autogenerated', 'automatically generated'] a__: List[Any] = example['content'].splitlines() for _, line in zip(range(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ): for keyword in keywords: if keyword in line.lower(): return {"autogenerated": True} else: return {"autogenerated": False} def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=0.05 ) ->Union[str, Any]: a__: Optional[int] = ['unit tests', 'test file', 'configuration file'] a__: Optional[Any] = example['content'].splitlines() a__: Any = 0 a__: Optional[Any] = 0 # first test for _, line in zip(range(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ): for keyword in keywords: if keyword in line.lower(): return {"config_or_test": True} # second test a__: List[Any] = example['content'].count('\n' ) a__: List[Any] = int(coeff * nlines ) for line in lines: count_config += line.lower().count('config' ) count_test += line.lower().count('test' ) if count_config > threshold or count_test > threshold: return {"config_or_test": True} return {"config_or_test": False} def __a ( _SCREAMING_SNAKE_CASE ) ->Dict: a__: Optional[Any] = ['def ', 'class ', 'for ', 'while '] a__: str = example['content'].splitlines() for line in lines: for keyword in keywords: if keyword in line.lower(): return {"has_no_keywords": False} return {"has_no_keywords": True} def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=4 ) ->Union[str, Any]: a__: str = example['content'].splitlines() a__: List[str] = 0 for line in lines: counter += line.lower().count('=' ) if counter > minimum: return {"has_few_assignments": False} return {"has_few_assignments": True} def __a ( _SCREAMING_SNAKE_CASE ) ->str: a__: Optional[Any] = tokenizer(example['content'] , truncation=_SCREAMING_SNAKE_CASE )['input_ids'] a__: List[str] = len(example['content'] ) / len(_SCREAMING_SNAKE_CASE ) return {"ratio": ratio} def __a ( _SCREAMING_SNAKE_CASE ) ->str: a__: int = {} results.update(get_hash(_SCREAMING_SNAKE_CASE ) ) results.update(line_stats(_SCREAMING_SNAKE_CASE ) ) results.update(alpha_stats(_SCREAMING_SNAKE_CASE ) ) results.update(char_token_ratio(_SCREAMING_SNAKE_CASE ) ) results.update(is_autogenerated(_SCREAMING_SNAKE_CASE ) ) results.update(is_config_or_test(_SCREAMING_SNAKE_CASE ) ) results.update(has_no_keywords(_SCREAMING_SNAKE_CASE ) ) results.update(has_few_assignments(_SCREAMING_SNAKE_CASE ) ) return results def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]: if not check_uniques(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return False elif example["autogenerated"]: return False elif example["line_max"] > args.line_max: return False elif example["line_mean"] > args.line_mean: return False elif example["alpha_frac"] < args.alpha_frac: return False elif example["ratio"] < args.min_token_ratio: return False elif example["config_or_test"] and np.random.rand() <= args.filter_proba: return False elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba: return False elif example["has_few_assignments"]: return False else: return True def __a ( _SCREAMING_SNAKE_CASE ) ->Tuple: with open(_SCREAMING_SNAKE_CASE , 'rb' ) as f_in: with gzip.open(str(_SCREAMING_SNAKE_CASE ) + '.gz' , 'wb' , compresslevel=6 ) as f_out: shutil.copyfileobj(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) os.unlink(_SCREAMING_SNAKE_CASE ) # Settings lowercase__ = HfArgumentParser(PreprocessingArguments) lowercase__ = parser.parse_args() if args.num_workers is None: lowercase__ = multiprocessing.cpu_count() lowercase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir) # Load dataset lowercase__ = time.time() lowercase__ = load_dataset(args.dataset_name, split='train') print(f"Time to load dataset: {time.time()-t_start:.2f}") # Run preprocessing lowercase__ = time.time() lowercase__ = ds.map(preprocess, num_proc=args.num_workers) print(f"Time to preprocess dataset: {time.time()-t_start:.2f}") # Deduplicate hashes lowercase__ = set(ds.unique('hash')) lowercase__ = len(uniques) / len(ds) print(f"Fraction of duplicates: {1-frac:.2%}") # Deduplicate data and apply heuristics lowercase__ = time.time() lowercase__ = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args}) print(f"Time to filter dataset: {time.time()-t_start:.2f}") print(f"Size of filtered dataset: {len(ds_filter)}") # Deduplicate with minhash and jaccard similarity if args.near_deduplication: lowercase__ = time.time() lowercase__ , lowercase__ = deduplicate_dataset(ds_filter, args.jaccard_threshold) print(f"Time to deduplicate dataset: {time.time()-t_start:.2f}") print(f"Size of deduplicate dataset: {len(ds_filter)}") # Save data in batches of samples_per_file lowercase__ = Path(args.output_dir) output_dir.mkdir(exist_ok=True) # save duplicate_clusters in the output_dir as artifacts # not sure it is the right place the save it if args.near_deduplication: with open(output_dir / 'duplicate_clusters.json', 'w') as f: json.dump(duplicate_clusters, f) lowercase__ = output_dir / 'data' data_dir.mkdir(exist_ok=True) lowercase__ = time.time() for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)): lowercase__ = str(data_dir / f"file-{file_number+1:012}.json") lowercase__ = min(len(ds_filter), index + args.samples_per_file) ds_filter.select(list(range(index, end_index))).to_json(file_path) compress_file(file_path) print(f"Time to save dataset: {time.time()-t_start:.2f}")
203
"""simple docstring""" from __future__ import annotations class __snake_case : def __init__( self , lowercase=None) -> Optional[Any]: '''simple docstring''' a__: int = data a__: str = None def __repr__( self) -> List[str]: '''simple docstring''' a__: Optional[Any] = [] a__: Union[str, Any] = self while temp: string_rep.append(f'{temp.data}') a__: Tuple = temp.next return "->".join(lowercase) def __a ( _SCREAMING_SNAKE_CASE ) ->str: if not elements_list: raise Exception('The Elements List is empty' ) a__: Any = Node(elements_list[0] ) for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ): a__: Optional[Any] = Node(elements_list[i] ) a__: Tuple = current.next return head def __a ( _SCREAMING_SNAKE_CASE ) ->None: if head_node is not None and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): print_reverse(head_node.next ) print(head_node.data ) def __a ( ) ->Optional[Any]: from doctest import testmod testmod() a__: Tuple = make_linked_list([14, 52, 14, 12, 43] ) print('Linked List:' ) print(_SCREAMING_SNAKE_CASE ) print('Elements in Reverse:' ) print_reverse(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
203
1
from datetime import datetime as dt import os from github import Github _a = [ "good first issue", "good second issue", "good difficult issue", "feature request", "new model", "wip", ] def lowerCAmelCase__() -> List[Any]: '''simple docstring''' lowerCamelCase__ = Github(os.environ['''GITHUB_TOKEN'''] ) lowerCamelCase__ = g.get_repo('''huggingface/transformers''' ) lowerCamelCase__ = repo.get_issues(state='''open''' ) for issue in open_issues: lowerCamelCase__ = sorted([comment for comment in issue.get_comments()] ,key=lambda __snake_case : i.created_at ,reverse=__snake_case ) lowerCamelCase__ = comments[0] if len(__snake_case ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state='''closed''' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( '''This issue has been automatically marked as stale because it has not had ''' '''recent activity. If you think this still needs to be addressed ''' '''please comment on this thread.\n\nPlease note that issues that do not follow the ''' '''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) ''' '''are likely to be ignored.''' ) if __name__ == "__main__": main()
209
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_xlnet import XLNetTokenizer else: _a = None _a = logging.get_logger(__name__) _a = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} _a = { "vocab_file": { "xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model", "xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model", }, "tokenizer_file": { "xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json", "xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json", }, } _a = { "xlnet-base-cased": None, "xlnet-large-cased": None, } _a = "▁" # Segments (not really needed) _a = 0 _a = 1 _a = 2 _a = 3 _a = 4 class __A ( lowerCAmelCase ): '''simple docstring''' lowerCAmelCase_ = VOCAB_FILES_NAMES lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ = """left""" lowerCAmelCase_ = XLNetTokenizer def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<sep>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="<cls>" , __lowerCAmelCase="<mask>" , __lowerCAmelCase=["<eop>", "<eod>"] , **__lowerCAmelCase , ): '''simple docstring''' lowerCamelCase__ = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token super().__init__( vocab_file=__lowerCAmelCase , tokenizer_file=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , remove_space=__lowerCAmelCase , keep_accents=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , **__lowerCAmelCase , ) lowerCamelCase__ = 3 lowerCamelCase__ = do_lower_case lowerCamelCase__ = remove_space lowerCamelCase__ = keep_accents lowerCamelCase__ = vocab_file lowerCamelCase__ = False if not self.vocab_file else True def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ): '''simple docstring''' lowerCamelCase__ = [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ): '''simple docstring''' lowerCamelCase__ = [self.sep_token_id] lowerCamelCase__ = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(__lowerCAmelCase ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return lowerCamelCase__ = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ): copyfile(self.vocab_file , __lowerCAmelCase ) return (out_vocab_file,)
209
1
import argparse from collections import defaultdict def _a ( lowerCamelCase: Union[str, Any] , lowerCamelCase: Tuple , lowerCamelCase: Union[str, Any] , lowerCamelCase: Optional[Any] , lowerCamelCase: Any ) -> Any: '''simple docstring''' __A = F"""{file}_{class_name}_{test_name}""" done_test[_id] += 1 with open(_snake_case , '''r''' ) as f: __A = f.readlines() __A = F"""class {class_name}(""" __A = F"""{4 * ' '}def {test_name}(""" __A = F"""{8 * ' '}{correct_line.split()[0]}""" __A = F"""{16 * ' '}{correct_line.split()[0]}""" __A = False __A = False __A = False __A = False __A = 0 __A = 0 __A = [] for line in lines: if line.startswith(_snake_case ): __A = True elif in_class and line.startswith(_snake_case ): __A = True elif in_class and in_func and (line.startswith(_snake_case ) or line.startswith(_snake_case )): __A = len(line.split(correct_line.split()[0] )[0] ) count += 1 if count == done_test[_id]: __A = True if in_class and in_func and in_line: if ")" not in line: continue else: __A = True if in_class and in_func and in_line and insert_line: new_lines.append(F"""{spaces * ' '}{correct_line}""" ) __A = False else: new_lines.append(_snake_case ) with open(_snake_case , '''w''' ) as f: for line in new_lines: f.write(_snake_case ) def _a ( lowerCamelCase: Dict , lowerCamelCase: Any=None ) -> List[Any]: '''simple docstring''' if fail is not None: with open(_snake_case , '''r''' ) as f: __A = {l.strip() for l in f.readlines()} else: __A = None with open(_snake_case , '''r''' ) as f: __A = f.readlines() __A = defaultdict(_snake_case ) for line in correct_lines: __A = line.split(''';''' ) if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures: overwrite_file(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) if __name__ == "__main__": snake_case__ = argparse.ArgumentParser() parser.add_argument('--correct_filename', help='filename of tests with expected result') parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None) snake_case__ = parser.parse_args() main(args.correct_filename, args.fail_filename)
352
import argparse import requests import torch from PIL import Image from torchvision.transforms import Compose, Normalize, Resize, ToTensor from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor def _a ( lowerCamelCase: List[str] ) -> Tuple: '''simple docstring''' __A = SwinaSRConfig() if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: __A = 4 elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: __A = 4 __A = 48 __A = '''pixelshuffle_aux''' elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: __A = [6, 6, 6, 6] __A = 60 __A = [6, 6, 6, 6] __A = '''pixelshuffledirect''' elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: __A = 4 __A = '''nearest+conv''' elif "Swin2SR_Jpeg_dynamic" in checkpoint_url: __A = 1 __A = 1 __A = 1_26 __A = 7 __A = 255.0 __A = '''''' return config def _a ( lowerCamelCase: List[Any] , lowerCamelCase: Optional[int] ) -> Optional[int]: '''simple docstring''' if "patch_embed.proj" in name and "layers" not in name: __A = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: __A = name.replace('''patch_embed.norm''' , '''embeddings.patch_embeddings.layernorm''' ) if "layers" in name: __A = name.replace('''layers''' , '''encoder.stages''' ) if "residual_group.blocks" in name: __A = name.replace('''residual_group.blocks''' , '''layers''' ) if "attn.proj" in name: __A = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: __A = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: __A = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: __A = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: __A = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: __A = name.replace('''mlp.fc2''' , '''output.dense''' ) if "q_bias" in name: __A = name.replace('''q_bias''' , '''query.bias''' ) if "k_bias" in name: __A = name.replace('''k_bias''' , '''key.bias''' ) if "v_bias" in name: __A = name.replace('''v_bias''' , '''value.bias''' ) if "cpb_mlp" in name: __A = name.replace('''cpb_mlp''' , '''continuous_position_bias_mlp''' ) if "patch_embed.proj" in name: __A = name.replace('''patch_embed.proj''' , '''patch_embed.projection''' ) if name == "norm.weight": __A = '''layernorm.weight''' if name == "norm.bias": __A = '''layernorm.bias''' if "conv_first" in name: __A = name.replace('''conv_first''' , '''first_convolution''' ) if ( "upsample" in name or "conv_before_upsample" in name or "conv_bicubic" in name or "conv_up" in name or "conv_hr" in name or "conv_last" in name or "aux" in name ): # heads if "conv_last" in name: __A = name.replace('''conv_last''' , '''final_convolution''' ) if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]: if "conv_before_upsample.0" in name: __A = name.replace('''conv_before_upsample.0''' , '''conv_before_upsample''' ) if "upsample.0" in name: __A = name.replace('''upsample.0''' , '''upsample.convolution_0''' ) if "upsample.2" in name: __A = name.replace('''upsample.2''' , '''upsample.convolution_1''' ) __A = '''upsample.''' + name elif config.upsampler == "pixelshuffledirect": __A = name.replace('''upsample.0.weight''' , '''upsample.conv.weight''' ) __A = name.replace('''upsample.0.bias''' , '''upsample.conv.bias''' ) else: pass else: __A = '''swin2sr.''' + name return name def _a ( lowerCamelCase: int , lowerCamelCase: Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' for key in orig_state_dict.copy().keys(): __A = orig_state_dict.pop(lowerCamelCase ) if "qkv" in key: __A = key.split('''.''' ) __A = int(key_split[1] ) __A = int(key_split[4] ) __A = config.embed_dim if "weight" in key: __A = val[:dim, :] __A = val[dim : dim * 2, :] __A = val[-dim:, :] else: __A = val[:dim] __A = val[dim : dim * 2] __A = val[-dim:] pass else: __A = val return orig_state_dict def _a ( lowerCamelCase: List[Any] , lowerCamelCase: int , lowerCamelCase: Optional[int] ) -> List[Any]: '''simple docstring''' __A = get_config(lowerCamelCase ) __A = SwinaSRForImageSuperResolution(lowerCamelCase ) model.eval() __A = torch.hub.load_state_dict_from_url(lowerCamelCase , map_location='''cpu''' ) __A = convert_state_dict(lowerCamelCase , lowerCamelCase ) __A , __A = model.load_state_dict(lowerCamelCase , strict=lowerCamelCase ) if len(lowerCamelCase ) > 0: raise ValueError('''Missing keys when converting: {}'''.format(lowerCamelCase ) ) for key in unexpected_keys: if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key): raise ValueError(F"""Unexpected key {key} in state_dict""" ) # verify values __A = '''https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true''' __A = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw ).convert('''RGB''' ) __A = SwinaSRImageProcessor() # pixel_values = processor(image, return_tensors="pt").pixel_values __A = 1_26 if '''Jpeg''' in checkpoint_url else 2_56 __A = Compose( [ Resize((image_size, image_size) ), ToTensor(), Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) __A = transforms(lowerCamelCase ).unsqueeze(0 ) if config.num_channels == 1: __A = pixel_values[:, 0, :, :].unsqueeze(1 ) __A = model(lowerCamelCase ) # assert values if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url: __A = torch.Size([1, 3, 5_12, 5_12] ) __A = torch.tensor( [[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] ) elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: __A = torch.Size([1, 3, 10_24, 10_24] ) __A = torch.tensor( [[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] ) elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: # TODO values didn't match exactly here __A = torch.Size([1, 3, 10_24, 10_24] ) __A = torch.tensor( [[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] ) elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: __A = torch.Size([1, 3, 5_12, 5_12] ) __A = torch.tensor( [[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] ) elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: __A = torch.Size([1, 3, 10_24, 10_24] ) __A = torch.tensor( [[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] ) assert ( outputs.reconstruction.shape == expected_shape ), F"""Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}""" assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , lowerCamelCase , atol=1e-3 ) print('''Looks ok!''' ) __A = { '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''': ( '''swin2SR-classical-sr-x2-64''' ), '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth''': ( '''swin2SR-classical-sr-x4-64''' ), '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth''': ( '''swin2SR-compressed-sr-x4-48''' ), '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth''': ( '''swin2SR-lightweight-x2-64''' ), '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth''': ( '''swin2SR-realworld-sr-x4-64-bsrgan-psnr''' ), } __A = url_to_name[checkpoint_url] if pytorch_dump_folder_path is not None: print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowerCamelCase ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) processor.save_pretrained(lowerCamelCase ) if push_to_hub: model.push_to_hub(F"""caidas/{model_name}""" ) processor.push_to_hub(F"""caidas/{model_name}""" ) if __name__ == "__main__": snake_case__ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth', type=str, help='URL of the original Swin2SR checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the converted model to the hub.') snake_case__ : str = parser.parse_args() convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
250
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowercase : List[Any] = logging.get_logger(__name__) __lowercase : str = { 'google/mobilenet_v2_1.4_224': 'https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json', 'google/mobilenet_v2_1.0_224': 'https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json', 'google/mobilenet_v2_0.75_160': 'https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json', 'google/mobilenet_v2_0.35_96': 'https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json', # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class __UpperCamelCase ( lowerCAmelCase_ ): A_ = "mobilenet_v2" def __init__( self , __a=3 , __a=224 , __a=1.0 , __a=8 , __a=8 , __a=6 , __a=32 , __a=True , __a=True , __a="relu6" , __a=True , __a=0.8 , __a=0.02 , __a=0.001 , __a=255 , **__a , ): '''simple docstring''' super().__init__(**__a ) if depth_multiplier <= 0: raise ValueError('depth_multiplier must be greater than zero.' ) __a : Any = num_channels __a : Dict = image_size __a : Optional[Any] = depth_multiplier __a : List[str] = depth_divisible_by __a : List[str] = min_depth __a : Any = expand_ratio __a : Optional[Any] = output_stride __a : str = first_layer_is_expansion __a : Optional[Any] = finegrained_output __a : Optional[Any] = hidden_act __a : List[Any] = tf_padding __a : Optional[int] = classifier_dropout_prob __a : Union[str, Any] = initializer_range __a : int = layer_norm_eps __a : str = semantic_loss_ignore_index class __UpperCamelCase ( lowerCAmelCase_ ): A_ = version.parse("1.11" ) @property def __UpperCAmelCase ( self ): '''simple docstring''' return OrderedDict([('pixel_values', {0: 'batch'})] ) @property def __UpperCAmelCase ( self ): '''simple docstring''' if self.task == "image-classification": return OrderedDict([('logits', {0: 'batch'})] ) else: return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] ) @property def __UpperCAmelCase ( self ): '''simple docstring''' return 1E-4
27
'''simple docstring''' from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ): __a : int = int(number**0.5 ) return number == sq * sq def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): __a : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den __a : int = x_den * y_den * z_den __a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) top //= hcf bottom //= hcf return top, bottom def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = 35 ): __a : set = set() __a : int __a : Fraction = Fraction(0 ) __a : tuple[int, int] for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 __a : Union[str, Any] = x_num * y_den + x_den * y_num __a : Optional[Any] = x_den * y_den __a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __a : Any = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) # n=2 __a : Optional[int] = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) __a : Union[str, Any] = x_den * x_den * y_den * y_den if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ): __a : List[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) ) __a : Any = int(sqrt(_SCREAMING_SNAKE_CASE ) ) __a : Optional[int] = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __a : List[Any] = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) # n=-1 __a : int = x_num * y_num __a : Optional[Any] = x_den * y_num + x_num * y_den __a : Tuple = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __a : Any = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) # n=2 __a : List[Any] = x_num * x_num * y_num * y_num __a : List[Any] = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ): __a : Optional[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) ) __a : Union[str, Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) ) __a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __a : List[str] = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) for num, den in unique_s: total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return total.denominator + total.numerator if __name__ == "__main__": print(f'''{solution() = }''')
27
1
'''simple docstring''' class _lowercase : def __init__( self: str , UpperCamelCase__: int ): lowerCamelCase__ : int = size lowerCamelCase__ : Optional[Any] = [0] * size lowerCamelCase__ : str = [0] * size @staticmethod def lowerCamelCase_ ( UpperCamelCase__: int ): return index | (index + 1) @staticmethod def lowerCamelCase_ ( UpperCamelCase__: int ): return (index & (index + 1)) - 1 def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: int , UpperCamelCase__: int ): lowerCamelCase__ : int = value while index < self.size: lowerCamelCase__ : Union[str, Any] = self.get_prev(UpperCamelCase__ ) + 1 if current_left_border == index: lowerCamelCase__ : Dict = value else: lowerCamelCase__ : List[Any] = max(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : Optional[Any] = self.get_next(UpperCamelCase__ ) def lowerCamelCase_ ( self: int , UpperCamelCase__: int , UpperCamelCase__: int ): right -= 1 # Because of right is exclusive lowerCamelCase__ : Union[str, Any] = 0 while left <= right: lowerCamelCase__ : Any = self.get_prev(UpperCamelCase__ ) if left <= current_left: lowerCamelCase__ : Union[str, Any] = max(UpperCamelCase__ , self.tree[right] ) lowerCamelCase__ : Tuple = current_left else: lowerCamelCase__ : Any = max(UpperCamelCase__ , self.arr[right] ) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
359
'''simple docstring''' from math import asin, atan, cos, radians, sin, sqrt, tan _A : List[str] =637_8137.0 _A : Dict =635_6752.31_4245 _A : int =6_378_137 def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> float: lowerCamelCase__ : str = (AXIS_A - AXIS_B) / AXIS_A lowerCamelCase__ : Dict = atan((1 - flattening) * tan(radians(UpperCamelCase ) ) ) lowerCamelCase__ : Dict = atan((1 - flattening) * tan(radians(UpperCamelCase ) ) ) lowerCamelCase__ : Optional[Any] = radians(UpperCamelCase ) lowerCamelCase__ : List[Any] = radians(UpperCamelCase ) # Equation lowerCamelCase__ : Tuple = sin((phi_a - phi_a) / 2 ) lowerCamelCase__ : List[Any] = sin((lambda_a - lambda_a) / 2 ) # Square both values sin_sq_phi *= sin_sq_phi sin_sq_lambda *= sin_sq_lambda lowerCamelCase__ : Tuple = sqrt(sin_sq_phi + (cos(UpperCamelCase ) * cos(UpperCamelCase ) * sin_sq_lambda) ) return 2 * RADIUS * asin(UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
129
0
'''simple docstring''' from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar lowercase__ : List[str] = TypeVar('''KEY''') lowercase__ : int = TypeVar('''VAL''') @dataclass(frozen=__snake_case , slots=__snake_case ) class SCREAMING_SNAKE_CASE (Generic[KEY, VAL] ): lowerCAmelCase = 42 lowerCAmelCase = 42 class SCREAMING_SNAKE_CASE (_Item ): def __init__( self): '''simple docstring''' super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) def __bool__( self): '''simple docstring''' return False lowercase__ : Optional[int] = _DeletedItem() class SCREAMING_SNAKE_CASE (MutableMapping[KEY, VAL] ): def __init__( self , _UpperCAmelCase = 8 , _UpperCAmelCase = 0.75): '''simple docstring''' __A : Dict = initial_block_size __A : Optional[Any] = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 __A : Union[str, Any] = capacity_factor __A : Optional[Any] = 0 def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase): '''simple docstring''' return hash(__SCREAMING_SNAKE_CASE) % len(self._buckets) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase): '''simple docstring''' return (ind + 1) % len(self._buckets) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : Tuple = self._buckets[ind] if not stored: __A : Optional[Any] = _Item(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) self._len += 1 return True elif stored.key == key: __A : Optional[int] = _Item(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) return True else: return False def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Tuple = len(self._buckets) * self._capacity_factor return len(self) >= int(__SCREAMING_SNAKE_CASE) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' if len(self._buckets) <= self._initial_block_size: return False __A : int = len(self._buckets) * self._capacity_factor / 2 return len(self) < limit def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase): '''simple docstring''' __A : List[str] = self._buckets __A : Dict = [None] * new_size __A : Optional[Any] = 0 for item in old_buckets: if item: self._add_item(item.key , item.val) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' self._resize(len(self._buckets) * 2) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' self._resize(len(self._buckets) // 2) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase): '''simple docstring''' __A : List[Any] = self._get_bucket_index(__SCREAMING_SNAKE_CASE) for _ in range(len(self._buckets)): yield ind __A : List[str] = self._get_next_ind(__SCREAMING_SNAKE_CASE) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' for ind in self._iterate_buckets(__SCREAMING_SNAKE_CASE): if self._try_set(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): break def __setitem__( self , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' if self._is_full(): self._size_up() self._add_item(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) def __delitem__( self , _UpperCAmelCase): '''simple docstring''' for ind in self._iterate_buckets(__SCREAMING_SNAKE_CASE): __A : int = self._buckets[ind] if item is None: raise KeyError(__SCREAMING_SNAKE_CASE) if item is _deleted: continue if item.key == key: __A : Optional[int] = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self , _UpperCAmelCase): '''simple docstring''' for ind in self._iterate_buckets(__SCREAMING_SNAKE_CASE): __A : Dict = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(__SCREAMING_SNAKE_CASE) def __len__( self): '''simple docstring''' return self._len def __iter__( self): '''simple docstring''' yield from (item.key for item in self._buckets if item) def __repr__( self): '''simple docstring''' __A : Optional[int] = ' ,'.join( F'{item.key}: {item.val}' for item in self._buckets if item) return F'HashMap({val_string})'
190
"""simple docstring""" def lowerCamelCase__ ( _lowerCamelCase : int , _lowerCamelCase : int ) -> int: lowerCamelCase_ = 1 # To kept the Calculated Value # Since C(n, k) = C(n, n-k) if k > (n - k): lowerCamelCase_ = n - k # Calculate C(n,k) for i in range(_lowerCamelCase ): result *= n - i result //= i + 1 return result def lowerCamelCase__ ( _lowerCamelCase : int ) -> int: return binomial_coefficient(2 * node_count , _lowerCamelCase ) // (node_count + 1) def lowerCamelCase__ ( _lowerCamelCase : int ) -> int: if n < 0: raise ValueError('factorial() not defined for negative values' ) lowerCamelCase_ = 1 for i in range(1 , n + 1 ): result *= i return result def lowerCamelCase__ ( _lowerCamelCase : int ) -> int: return catalan_number(_lowerCamelCase ) * factorial(_lowerCamelCase ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE : int = int(input('''Enter the number of nodes: ''').strip() or 0) if node_count <= 0: raise ValueError('''We need some nodes to work with.''') print( F'''Given {node_count} nodes, there are {binary_tree_count(node_count)} ''' F'''binary trees and {catalan_number(node_count)} binary search trees.''' )
183
0
'''simple docstring''' from __future__ import annotations from random import choice def UpperCamelCase_ ( A__ : Dict ): '''simple docstring''' return choice(A__ ) def UpperCamelCase_ ( A__ : list[int] , A__ : int ): '''simple docstring''' lowerCAmelCase_ : List[Any] = random_pivot(A__ ) # partition based on pivot # linear time lowerCAmelCase_ : Union[str, Any] = [e for e in lst if e < pivot] lowerCAmelCase_ : Union[str, Any] = [e for e in lst if e > pivot] # if we get lucky, pivot might be the element we want. # we can easily see this: # small (elements smaller than k) # + pivot (kth element) # + big (elements larger than k) if len(A__ ) == k - 1: return pivot # pivot is in elements bigger than k elif len(A__ ) < k - 1: return kth_number(A__ , k - len(A__ ) - 1 ) # pivot is in elements smaller than k else: return kth_number(A__ , A__ ) if __name__ == "__main__": import doctest doctest.testmod()
369
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A : List[str] = { "configuration_bigbird_pegasus": [ "BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP", "BigBirdPegasusConfig", "BigBirdPegasusOnnxConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = [ "BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST", "BigBirdPegasusForCausalLM", "BigBirdPegasusForConditionalGeneration", "BigBirdPegasusForQuestionAnswering", "BigBirdPegasusForSequenceClassification", "BigBirdPegasusModel", "BigBirdPegasusPreTrainedModel", ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys __A : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
89
0
'''simple docstring''' import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class __a : @staticmethod def UpperCAmelCase__ ( *__magic_name__ : List[str] , **__magic_name__ : Any ) -> str: """simple docstring""" pass def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Image ) -> str: UpperCAmelCase_ : Dict = hashlib.mda(image.tobytes() ) return m.hexdigest()[:10] def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Image ) -> Dict: UpperCAmelCase_ : List[str] = np.array(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : List[Any] = npimg.shape return {"hash": hashimage(SCREAMING_SNAKE_CASE__ ), "shape": shape} @is_pipeline_test @require_vision @require_torch class __a (unittest.TestCase ): __a : Optional[int] = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) __a : Optional[Any] = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : str ) -> str: """simple docstring""" UpperCAmelCase_ : int = MaskGenerationPipeline(model=__magic_name__ , image_processor=__magic_name__ ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def UpperCAmelCase__ ( self : str , __magic_name__ : int , __magic_name__ : Optional[Any] ) -> Tuple: """simple docstring""" pass @require_tf @unittest.skip('''Image segmentation not implemented in TF''' ) def UpperCAmelCase__ ( self : List[Any] ) -> int: """simple docstring""" pass @slow @require_torch def UpperCAmelCase__ ( self : Optional[Any] ) -> Tuple: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''' ) UpperCAmelCase_ : Optional[int] = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=2_56 ) # Shortening by hashing UpperCAmelCase_ : Dict = [] for i, o in enumerate(outputs['''masks'''] ): new_outupt += [{"mask": mask_to_test_readable(__magic_name__ ), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(__magic_name__ , decimals=4 ) , [ {'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (4_80, 6_40)}, '''scores''': 1.0_4_4_4}, {'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (4_80, 6_40)}, '''scores''': 1.0_2_1}, {'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (4_80, 6_40)}, '''scores''': 1.0_1_6_7}, {'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (4_80, 6_40)}, '''scores''': 1.0_1_3_2}, {'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (4_80, 6_40)}, '''scores''': 1.0_0_5_3}, {'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9_9_6_7}, {'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9_9_3}, {'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9_9_0_9}, {'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9_8_7_9}, {'''mask''': {'''hash''': '''801064ff79''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9_8_3_4}, {'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9_7_1_6}, {'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9_6_1_2}, {'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9_5_9_9}, {'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9_5_5_2}, {'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9_5_3_2}, {'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9_5_1_6}, {'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9_4_9_9}, {'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9_4_8_3}, {'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9_4_6_4}, {'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9_4_3}, {'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9_4_3}, {'''mask''': {'''hash''': '''c749b25868''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9_4_0_8}, {'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9_3_3_5}, {'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9_3_2_6}, {'''mask''': {'''hash''': '''788b798e24''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9_2_6_2}, {'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (4_80, 6_40)}, '''scores''': 0.8_9_9_9}, {'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (4_80, 6_40)}, '''scores''': 0.8_9_8_6}, {'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (4_80, 6_40)}, '''scores''': 0.8_9_8_4}, {'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (4_80, 6_40)}, '''scores''': 0.8_8_7_3}, {'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (4_80, 6_40)}, '''scores''': 0.8_8_7_1} ] , ) # fmt: on @require_torch @slow def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Dict = '''facebook/sam-vit-huge''' UpperCAmelCase_ : int = pipeline('''mask-generation''' , model=__magic_name__ ) UpperCAmelCase_ : Union[str, Any] = image_segmenter( '''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=2_56 ) # Shortening by hashing UpperCAmelCase_ : Union[str, Any] = [] for i, o in enumerate(outputs['''masks'''] ): new_outupt += [{"mask": mask_to_test_readable(__magic_name__ ), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(__magic_name__ , decimals=4 ) , [ {'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (4_80, 6_40)}, '''scores''': 1.0_4_4_4}, {'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (4_80, 6_40)}, '''scores''': 1.0_2_1_0}, {'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (4_80, 6_40)}, '''scores''': 1.0_1_6_7}, {'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (4_80, 6_40)}, '''scores''': 1.0_1_3_2}, {'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (4_80, 6_40)}, '''scores''': 1.0_0_5_3}, ] , )
125
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers import ( TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, BertConfig, DPRConfig, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, ) class __a : def __init__( self : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : Optional[int]=13 , __magic_name__ : str=7 , __magic_name__ : Dict=True , __magic_name__ : Dict=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Tuple=99 , __magic_name__ : List[str]=32 , __magic_name__ : int=2 , __magic_name__ : List[str]=4 , __magic_name__ : Tuple=37 , __magic_name__ : Dict="gelu" , __magic_name__ : int=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Optional[int]=5_12 , __magic_name__ : Tuple=16 , __magic_name__ : Optional[int]=2 , __magic_name__ : Optional[int]=0.0_2 , __magic_name__ : Dict=3 , __magic_name__ : str=4 , __magic_name__ : Optional[Any]=None , __magic_name__ : Any=0 , ) -> Any: """simple docstring""" UpperCAmelCase_ : str = parent UpperCAmelCase_ : List[Any] = batch_size UpperCAmelCase_ : List[Any] = seq_length UpperCAmelCase_ : Dict = is_training UpperCAmelCase_ : Optional[Any] = use_input_mask UpperCAmelCase_ : Tuple = use_token_type_ids UpperCAmelCase_ : int = use_labels UpperCAmelCase_ : Union[str, Any] = vocab_size UpperCAmelCase_ : Union[str, Any] = hidden_size UpperCAmelCase_ : Dict = num_hidden_layers UpperCAmelCase_ : Any = num_attention_heads UpperCAmelCase_ : Any = intermediate_size UpperCAmelCase_ : Dict = hidden_act UpperCAmelCase_ : Tuple = hidden_dropout_prob UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob UpperCAmelCase_ : str = max_position_embeddings UpperCAmelCase_ : str = type_vocab_size UpperCAmelCase_ : List[str] = type_sequence_label_size UpperCAmelCase_ : Tuple = initializer_range UpperCAmelCase_ : str = num_labels UpperCAmelCase_ : Tuple = num_choices UpperCAmelCase_ : Union[str, Any] = scope UpperCAmelCase_ : Union[str, Any] = projection_dim def UpperCAmelCase__ ( self : Optional[Any] ) -> int: """simple docstring""" UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ : Dict = None if self.use_input_mask: # follow test_modeling_tf_ctrl.py UpperCAmelCase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase_ : Tuple = None if self.use_token_type_ids: UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase_ : Optional[int] = None UpperCAmelCase_ : Optional[Any] = None UpperCAmelCase_ : int = None if self.use_labels: UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase_ : Optional[Any] = BertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , ) UpperCAmelCase_ : List[str] = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : str , __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : List[str] , __magic_name__ : List[str] , __magic_name__ : Tuple , __magic_name__ : Any ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = TFDPRContextEncoder(config=__magic_name__ ) UpperCAmelCase_ : Tuple = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ ) UpperCAmelCase_ : int = model(__magic_name__ , token_type_ids=__magic_name__ ) UpperCAmelCase_ : Any = model(__magic_name__ ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) ) def UpperCAmelCase__ ( self : List[str] , __magic_name__ : int , __magic_name__ : Dict , __magic_name__ : Tuple , __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : Any , __magic_name__ : Tuple ) -> int: """simple docstring""" UpperCAmelCase_ : List[str] = TFDPRQuestionEncoder(config=__magic_name__ ) UpperCAmelCase_ : Optional[int] = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ ) UpperCAmelCase_ : Optional[int] = model(__magic_name__ , token_type_ids=__magic_name__ ) UpperCAmelCase_ : List[Any] = model(__magic_name__ ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) ) def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Any , __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : List[Any] ) -> List[str]: """simple docstring""" UpperCAmelCase_ : int = TFDPRReader(config=__magic_name__ ) UpperCAmelCase_ : Tuple = model(__magic_name__ , attention_mask=__magic_name__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) ) def UpperCAmelCase__ ( self : Optional[Any] ) -> Any: """simple docstring""" UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Optional[int] = config_and_inputs UpperCAmelCase_ : Any = {'''input_ids''': input_ids} return config, inputs_dict @require_tf class __a (lowerCamelCase , lowerCamelCase , unittest.TestCase ): __a : Any = ( ( TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, ) if is_tf_available() else () ) __a : int = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {} __a : str = False __a : str = False __a : Dict = False __a : Optional[Any] = False __a : Any = False def UpperCAmelCase__ ( self : int ) -> Tuple: """simple docstring""" UpperCAmelCase_ : Optional[int] = TFDPRModelTester(self ) UpperCAmelCase_ : Dict = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 ) def UpperCAmelCase__ ( self : List[str] ) -> Dict: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_context_encoder(*__magic_name__ ) def UpperCAmelCase__ ( self : Optional[Any] ) -> Any: """simple docstring""" UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_question_encoder(*__magic_name__ ) def UpperCAmelCase__ ( self : int ) -> List[str]: """simple docstring""" UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_reader(*__magic_name__ ) @slow def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]: """simple docstring""" for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Union[str, Any] = TFDPRContextEncoder.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Optional[Any] = TFDPRContextEncoder.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Tuple = TFDPRQuestionEncoder.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Tuple = TFDPRReader.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) @require_tf class __a (unittest.TestCase ): @slow def UpperCAmelCase__ ( self : Optional[int] ) -> str: """simple docstring""" UpperCAmelCase_ : Any = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' ) UpperCAmelCase_ : Optional[int] = tf.constant( [[1_01, 75_92, 10_10, 20_03, 20_26, 38_99, 1_01_40, 10_29, 1_02]] ) # [CLS] hello, is my dog cute? [SEP] UpperCAmelCase_ : List[Any] = model(__magic_name__ )[0] # embedding shape = (1, 768) # compare the actual values for a slice. UpperCAmelCase_ : List[str] = tf.constant( [ [ 0.0_3_2_3_6_2_5_3, 0.1_2_7_5_3_3_3_5, 0.1_6_8_1_8_5_0_9, 0.0_0_2_7_9_7_8_6, 0.3_8_9_6_9_3_3, 0.2_4_2_6_4_9_4_5, 0.2_1_7_8_9_7_1, -0.0_2_3_3_5_2_2_7, -0.0_8_4_8_1_9_5_9, -0.1_4_3_2_4_1_1_7, ] ] ) self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
125
1
'''simple docstring''' import importlib import inspect import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py UpperCamelCase_ : Any = '''src/transformers''' # This is to make sure the transformers module imported is the one in the repo. UpperCamelCase_ : List[str] = importlib.util.spec_from_file_location( '''transformers''', os.path.join(PATH_TO_TRANSFORMERS, '''__init__.py'''), submodule_search_locations=[PATH_TO_TRANSFORMERS], ) UpperCamelCase_ : List[Any] = spec.loader.load_module() UpperCamelCase_ : str = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` UpperCamelCase_ : List[Any] = re.compile('''\[(.+?)\]\((https://huggingface\.co/.+?)\)''') UpperCamelCase_ : int = { '''CLIPConfigMixin''', '''DecisionTransformerConfigMixin''', '''EncoderDecoderConfigMixin''', '''RagConfigMixin''', '''SpeechEncoderDecoderConfigMixin''', '''VisionEncoderDecoderConfigMixin''', '''VisionTextDualEncoderConfigMixin''', } def __a ( ) -> Optional[int]: """simple docstring""" _snake_case = [] for config_class in list(CONFIG_MAPPING.values() ): _snake_case = False # source code of `config_class` _snake_case = inspect.getsource(_UpperCamelCase ) _snake_case = _re_checkpoint.findall(_UpperCamelCase ) for checkpoint in checkpoints: # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` _snake_case , _snake_case = checkpoint # verify the checkpoint name corresponds to the checkpoint link _snake_case = F"""https://huggingface.co/{ckpt_name}""" if ckpt_link == ckpt_link_from_name: _snake_case = True break _snake_case = config_class.__name__ if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(_UpperCamelCase ) if len(_UpperCamelCase ) > 0: _snake_case = "\n".join(sorted(_UpperCamelCase ) ) raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
142
'''simple docstring''' from manim import * class _a ( __lowerCAmelCase ): def _lowercase ( self ) -> Optional[int]: _snake_case = Rectangle(height=0.5 ,width=0.5 ) _snake_case = Rectangle(height=0.4_6 ,width=0.4_6 ).set_stroke(width=0 ) _snake_case = [mem.copy() for i in range(6 )] _snake_case = [mem.copy() for i in range(6 )] _snake_case = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0 ) _snake_case = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0 ) _snake_case = VGroup(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0 ) _snake_case = Text("CPU" ,font_size=24 ) _snake_case = Group(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0.5 ,aligned_edge=_SCREAMING_SNAKE_CASE ) cpu.move_to([-2.5, -0.5, 0] ) self.add(_SCREAMING_SNAKE_CASE ) _snake_case = [mem.copy() for i in range(4 )] _snake_case = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0 ) _snake_case = Text("GPU" ,font_size=24 ) _snake_case = Group(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0.5 ,aligned_edge=_SCREAMING_SNAKE_CASE ) gpu.move_to([-1, -1, 0] ) self.add(_SCREAMING_SNAKE_CASE ) _snake_case = [mem.copy() for i in range(6 )] _snake_case = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0 ) _snake_case = Text("Model" ,font_size=24 ) _snake_case = Group(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0.5 ,aligned_edge=_SCREAMING_SNAKE_CASE ) model.move_to([3, -1.0, 0] ) self.add(_SCREAMING_SNAKE_CASE ) _snake_case = [] for i, rect in enumerate(_SCREAMING_SNAKE_CASE ): rect.set_stroke(_SCREAMING_SNAKE_CASE ) # target = fill.copy().set_fill(YELLOW, opacity=0.7) # target.move_to(rect) # self.add(target) _snake_case = Rectangle(height=0.4_6 / 4 ,width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(_SCREAMING_SNAKE_CASE ,opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.0_2 ,direction=_SCREAMING_SNAKE_CASE ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(cpu_targs[0] ,direction=_SCREAMING_SNAKE_CASE ,buff=0.0 ) else: cpu_target.next_to(cpu_targs[i - 1] ,direction=_SCREAMING_SNAKE_CASE ,buff=0.0 ) self.add(_SCREAMING_SNAKE_CASE ) cpu_targs.append(_SCREAMING_SNAKE_CASE ) _snake_case = [mem.copy() for i in range(6 )] _snake_case = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0 ) _snake_case = Text("Loaded Checkpoint" ,font_size=24 ) _snake_case = Group(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,aligned_edge=_SCREAMING_SNAKE_CASE ,buff=0.4 ) checkpoint.move_to([3, 0.5, 0] ) _snake_case = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) _snake_case = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" ,font_size=18 ,) key_text.move_to([-5, 2.4, 0] ) self.add(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) _snake_case = MarkupText( f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" ,font_size=18 ,) blue_text.next_to(_SCREAMING_SNAKE_CASE ,DOWN * 2.4 ,aligned_edge=key_text.get_left() ) _snake_case = MarkupText( f"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" ,font_size=24 ,) step_a.move_to([2, 2, 0] ) self.play(Write(_SCREAMING_SNAKE_CASE ) ,Write(_SCREAMING_SNAKE_CASE ) ) self.play(Write(_SCREAMING_SNAKE_CASE ,run_time=1 ) ,Create(_SCREAMING_SNAKE_CASE ,run_time=1 ) ) _snake_case = [] _snake_case = [] for i, rect in enumerate(_SCREAMING_SNAKE_CASE ): _snake_case = fill.copy().set_fill(_SCREAMING_SNAKE_CASE ,opacity=0.7 ) target.move_to(_SCREAMING_SNAKE_CASE ) first_animations.append(GrowFromCenter(_SCREAMING_SNAKE_CASE ,run_time=1 ) ) _snake_case = target.copy() cpu_target.generate_target() if i < 5: cpu_target.target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.target.move_to(cpu_right_col_base[i - 5] ) second_animations.append(MoveToTarget(_SCREAMING_SNAKE_CASE ,run_time=1.5 ) ) self.play(*_SCREAMING_SNAKE_CASE ) self.play(*_SCREAMING_SNAKE_CASE ) self.wait()
142
1
import argparse from pathlib import Path from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration def lowerCAmelCase ( _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Path , _lowerCAmelCase : str = None , _lowerCAmelCase : str = None , _lowerCAmelCase : str = None , ): """simple docstring""" if config_name_or_path is None: UpperCAmelCase__ = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base" if generator_tokenizer_name_or_path is None: UpperCAmelCase__ = generator_name_or_path if question_encoder_tokenizer_name_or_path is None: UpperCAmelCase__ = question_encoder_name_or_path UpperCAmelCase__ = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration # Save model. UpperCAmelCase__ = RagConfig.from_pretrained(_lowerCAmelCase ) UpperCAmelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) UpperCAmelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) UpperCAmelCase__ = gen_config UpperCAmelCase__ = question_encoder_config UpperCAmelCase__ = model_class.from_pretrained_question_encoder_generator( _lowerCAmelCase , _lowerCAmelCase , config=_lowerCAmelCase ) rag_model.save_pretrained(_lowerCAmelCase ) # Sanity check. model_class.from_pretrained(_lowerCAmelCase ) # Save tokenizers. UpperCAmelCase__ = AutoTokenizer.from_pretrained(_lowerCAmelCase ) gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" ) UpperCAmelCase__ = AutoTokenizer.from_pretrained(_lowerCAmelCase ) question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" ) if __name__ == "__main__": _lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument( "--model_type", choices=["rag_sequence", "rag_token"], required=True, type=str, help="RAG model type: rag_sequence, rag_token", ) parser.add_argument("--dest", type=str, required=True, help="Path to the output checkpoint directory.") parser.add_argument("--generator_name_or_path", type=str, required=True, help="Generator model identifier") parser.add_argument( "--question_encoder_name_or_path", type=str, required=True, help="Question encoder model identifier" ) parser.add_argument( "--generator_tokenizer_name_or_path", type=str, help="Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``", ) parser.add_argument( "--question_encoder_tokenizer_name_or_path", type=str, help="Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``", ) parser.add_argument( "--config_name_or_path", type=str, help=( "Identifier of the model config to use, if not provided, resolves to a base config for a given" " ``model_type``" ), ) _lowerCAmelCase : Union[str, Any] = parser.parse_args() _lowerCAmelCase : str = Path(args.dest) dest_dir.mkdir(exist_ok=True) consolidate( args.model_type, args.generator_name_or_path, args.question_encoder_name_or_path, dest_dir, args.config_name_or_path, args.generator_tokenizer_name_or_path, args.question_encoder_tokenizer_name_or_path, )
169
import argparse import os import transformers from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS from .utils import logging logging.set_verbosity_info() _lowerCAmelCase : Any = logging.get_logger(__name__) _lowerCAmelCase : Tuple = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS} def lowerCAmelCase ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] ): """simple docstring""" if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES: raise ValueError(F'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.''' ) if tokenizer_name is None: UpperCAmelCase__ = TOKENIZER_CLASSES else: UpperCAmelCase__ = {tokenizer_name: getattr(_lowerCAmelCase , tokenizer_name + "Fast" )} logger.info(F'''Loading tokenizer classes: {tokenizer_names}''' ) for tokenizer_name in tokenizer_names: UpperCAmelCase__ = TOKENIZER_CLASSES[tokenizer_name] UpperCAmelCase__ = True if checkpoint_name is None: UpperCAmelCase__ = list(tokenizer_class.max_model_input_sizes.keys() ) else: UpperCAmelCase__ = [checkpoint_name] logger.info(F'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''' ) for checkpoint in checkpoint_names: logger.info(F'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''' ) # Load tokenizer UpperCAmelCase__ = tokenizer_class.from_pretrained(_lowerCAmelCase , force_download=_lowerCAmelCase ) # Save fast tokenizer logger.info(F'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''' ) # For organization names we create sub-directories if "/" in checkpoint: UpperCAmelCase__ , UpperCAmelCase__ = checkpoint.split("/" ) UpperCAmelCase__ = os.path.join(_lowerCAmelCase , _lowerCAmelCase ) elif add_prefix: UpperCAmelCase__ = checkpoint UpperCAmelCase__ = dump_path else: UpperCAmelCase__ = None UpperCAmelCase__ = dump_path logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' ) if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]: UpperCAmelCase__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint] UpperCAmelCase__ = file_path.split(_lowerCAmelCase )[-1][0] if next_char == "/": UpperCAmelCase__ = os.path.join(_lowerCAmelCase , _lowerCAmelCase ) UpperCAmelCase__ = None logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' ) UpperCAmelCase__ = tokenizer.save_pretrained( _lowerCAmelCase , legacy_format=_lowerCAmelCase , filename_prefix=_lowerCAmelCase ) logger.info(F'''=> File names {file_names}''' ) for file_name in file_names: if not file_name.endswith("tokenizer.json" ): os.remove(_lowerCAmelCase ) logger.info(F'''=> removing {file_name}''' ) if __name__ == "__main__": _lowerCAmelCase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( "--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files." ) parser.add_argument( "--tokenizer_name", default=None, type=str, help=( F'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will ''' "download and convert all the checkpoints from AWS." ), ) parser.add_argument( "--checkpoint_name", default=None, type=str, help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.", ) parser.add_argument( "--force_download", action="store_true", help="Re-download checkpoints.", ) _lowerCAmelCase : str = parser.parse_args() convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
169
1
"""simple docstring""" from collections import deque from math import floor from random import random from time import time class lowerCamelCase__ : def __init__( self ): UpperCAmelCase = {} def _UpperCamelCase ( self ,A ,A ,A=1 ): if self.graph.get(A ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: UpperCAmelCase = [[w, v]] if not self.graph.get(A ): UpperCAmelCase = [] def _UpperCamelCase ( self ): return list(self.graph ) def _UpperCamelCase ( self ,A ,A ): if self.graph.get(A ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(A ) def _UpperCamelCase ( self ,A=-2 ,A=-1 ): if s == d: return [] UpperCAmelCase = [] UpperCAmelCase = [] if s == -2: UpperCAmelCase = list(self.graph )[0] stack.append(A ) visited.append(A ) UpperCAmelCase = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: UpperCAmelCase = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(A ) return visited else: stack.append(node[1] ) visited.append(node[1] ) UpperCAmelCase = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(A ) != 0: UpperCAmelCase = stack[len(A ) - 1] else: UpperCAmelCase = ss # check if se have reached the starting point if len(A ) == 0: return visited def _UpperCamelCase ( self ,A=-1 ): if c == -1: UpperCAmelCase = floor(random() * 10_000 ) + 10 for i in range(A ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): UpperCAmelCase = floor(random() * c ) + 1 if n != i: self.add_pair(A ,A ,1 ) def _UpperCamelCase ( self ,A=-2 ): UpperCAmelCase = deque() UpperCAmelCase = [] if s == -2: UpperCAmelCase = list(self.graph )[0] d.append(A ) visited.append(A ) while d: UpperCAmelCase = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def _UpperCamelCase ( self ,A ): UpperCAmelCase = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def _UpperCamelCase ( self ,A ): return len(self.graph[u] ) def _UpperCamelCase ( self ,A=-2 ): UpperCAmelCase = [] UpperCAmelCase = [] if s == -2: UpperCAmelCase = list(self.graph )[0] stack.append(A ) visited.append(A ) UpperCAmelCase = s UpperCAmelCase = [] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: UpperCAmelCase = s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) UpperCAmelCase = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(A ) != 0: UpperCAmelCase = stack[len(A ) - 1] else: UpperCAmelCase = ss # check if se have reached the starting point if len(A ) == 0: return sorted_nodes def _UpperCamelCase ( self ): UpperCAmelCase = [] UpperCAmelCase = [] UpperCAmelCase = list(self.graph )[0] stack.append(A ) visited.append(A ) UpperCAmelCase = -2 UpperCAmelCase = [] UpperCAmelCase = s UpperCAmelCase = False UpperCAmelCase = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: UpperCAmelCase = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): UpperCAmelCase = len(A ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) UpperCAmelCase = node[1] break # check if all the children are visited if s == ss: stack.pop() UpperCAmelCase = True if len(A ) != 0: UpperCAmelCase = stack[len(A ) - 1] else: UpperCAmelCase = False indirect_parents.append(A ) UpperCAmelCase = s UpperCAmelCase = ss # check if se have reached the starting point if len(A ) == 0: return list(A ) def _UpperCamelCase ( self ): UpperCAmelCase = [] UpperCAmelCase = [] UpperCAmelCase = list(self.graph )[0] stack.append(A ) visited.append(A ) UpperCAmelCase = -2 UpperCAmelCase = [] UpperCAmelCase = s UpperCAmelCase = False UpperCAmelCase = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: UpperCAmelCase = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): UpperCAmelCase = len(A ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) UpperCAmelCase = node[1] break # check if all the children are visited if s == ss: stack.pop() UpperCAmelCase = True if len(A ) != 0: UpperCAmelCase = stack[len(A ) - 1] else: UpperCAmelCase = False indirect_parents.append(A ) UpperCAmelCase = s UpperCAmelCase = ss # check if se have reached the starting point if len(A ) == 0: return False def _UpperCamelCase ( self ,A=-2 ,A=-1 ): UpperCAmelCase = time() self.dfs(A ,A ) UpperCAmelCase = time() return end - begin def _UpperCamelCase ( self ,A=-2 ): UpperCAmelCase = time() self.bfs(A ) UpperCAmelCase = time() return end - begin class lowerCamelCase__ : def __init__( self ): UpperCAmelCase = {} def _UpperCamelCase ( self ,A ,A ,A=1 ): # check if the u exists if self.graph.get(A ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist UpperCAmelCase = [[w, v]] # add the other way if self.graph.get(A ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist UpperCAmelCase = [[w, u]] def _UpperCamelCase ( self ,A ,A ): if self.graph.get(A ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(A ) # the other way round if self.graph.get(A ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(A ) def _UpperCamelCase ( self ,A=-2 ,A=-1 ): if s == d: return [] UpperCAmelCase = [] UpperCAmelCase = [] if s == -2: UpperCAmelCase = list(self.graph )[0] stack.append(A ) visited.append(A ) UpperCAmelCase = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: UpperCAmelCase = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(A ) return visited else: stack.append(node[1] ) visited.append(node[1] ) UpperCAmelCase = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(A ) != 0: UpperCAmelCase = stack[len(A ) - 1] else: UpperCAmelCase = ss # check if se have reached the starting point if len(A ) == 0: return visited def _UpperCamelCase ( self ,A=-1 ): if c == -1: UpperCAmelCase = floor(random() * 10_000 ) + 10 for i in range(A ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): UpperCAmelCase = floor(random() * c ) + 1 if n != i: self.add_pair(A ,A ,1 ) def _UpperCamelCase ( self ,A=-2 ): UpperCAmelCase = deque() UpperCAmelCase = [] if s == -2: UpperCAmelCase = list(self.graph )[0] d.append(A ) visited.append(A ) while d: UpperCAmelCase = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def _UpperCamelCase ( self ,A ): return len(self.graph[u] ) def _UpperCamelCase ( self ): UpperCAmelCase = [] UpperCAmelCase = [] UpperCAmelCase = list(self.graph )[0] stack.append(A ) visited.append(A ) UpperCAmelCase = -2 UpperCAmelCase = [] UpperCAmelCase = s UpperCAmelCase = False UpperCAmelCase = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: UpperCAmelCase = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): UpperCAmelCase = len(A ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) UpperCAmelCase = node[1] break # check if all the children are visited if s == ss: stack.pop() UpperCAmelCase = True if len(A ) != 0: UpperCAmelCase = stack[len(A ) - 1] else: UpperCAmelCase = False indirect_parents.append(A ) UpperCAmelCase = s UpperCAmelCase = ss # check if se have reached the starting point if len(A ) == 0: return list(A ) def _UpperCamelCase ( self ): UpperCAmelCase = [] UpperCAmelCase = [] UpperCAmelCase = list(self.graph )[0] stack.append(A ) visited.append(A ) UpperCAmelCase = -2 UpperCAmelCase = [] UpperCAmelCase = s UpperCAmelCase = False UpperCAmelCase = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: UpperCAmelCase = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): UpperCAmelCase = len(A ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) UpperCAmelCase = node[1] break # check if all the children are visited if s == ss: stack.pop() UpperCAmelCase = True if len(A ) != 0: UpperCAmelCase = stack[len(A ) - 1] else: UpperCAmelCase = False indirect_parents.append(A ) UpperCAmelCase = s UpperCAmelCase = ss # check if se have reached the starting point if len(A ) == 0: return False def _UpperCamelCase ( self ): return list(self.graph ) def _UpperCamelCase ( self ,A=-2 ,A=-1 ): UpperCAmelCase = time() self.dfs(A ,A ) UpperCAmelCase = time() return end - begin def _UpperCamelCase ( self ,A=-2 ): UpperCAmelCase = time() self.bfs(A ) UpperCAmelCase = time() return end - begin
358
"""simple docstring""" from pathlib import Path from typing import List from transformers import is_torch_available, is_vision_available from transformers.testing_utils import get_tests_dir, is_tool_test from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText if is_torch_available(): import torch if is_vision_available(): from PIL import Image _UpperCamelCase = ["""text""", """image""", """audio"""] def _a ( _snake_case ): """simple docstring""" UpperCAmelCase = [] for input_type in input_types: if input_type == "text": inputs.append("""Text input""" ) elif input_type == "image": inputs.append( Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) ) elif input_type == "audio": inputs.append(torch.ones(3000 ) ) elif isinstance(_snake_case , _snake_case ): inputs.append(create_inputs(_snake_case ) ) else: raise ValueError(F'''Invalid type requested: {input_type}''' ) return inputs def _a ( _snake_case ): """simple docstring""" UpperCAmelCase = [] for output in outputs: if isinstance(_snake_case , (str, AgentText) ): output_types.append("""text""" ) elif isinstance(_snake_case , (Image.Image, AgentImage) ): output_types.append("""image""" ) elif isinstance(_snake_case , (torch.Tensor, AgentAudio) ): output_types.append("""audio""" ) else: raise ValueError(F'''Invalid output: {output}''' ) return output_types @is_tool_test class lowerCamelCase__ : def _UpperCamelCase ( self ): self.assertTrue(hasattr(self.tool ,"""inputs""" ) ) self.assertTrue(hasattr(self.tool ,"""outputs""" ) ) UpperCAmelCase = self.tool.inputs for _input in inputs: if isinstance(_input ,A ): for __input in _input: self.assertTrue(__input in authorized_types ) else: self.assertTrue(_input in authorized_types ) UpperCAmelCase = self.tool.outputs for _output in outputs: self.assertTrue(_output in authorized_types ) def _UpperCamelCase ( self ): UpperCAmelCase = create_inputs(self.tool.inputs ) UpperCAmelCase = self.tool(*A ) # There is a single output if len(self.tool.outputs ) == 1: UpperCAmelCase = [outputs] self.assertListEqual(output_types(A ) ,self.tool.outputs ) def _UpperCamelCase ( self ): self.assertTrue(hasattr(self.tool ,"""description""" ) ) self.assertTrue(hasattr(self.tool ,"""default_checkpoint""" ) ) self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) ) def _UpperCamelCase ( self ): UpperCAmelCase = create_inputs(self.tool.inputs ) UpperCAmelCase = self.tool(*A ) if not isinstance(A ,A ): UpperCAmelCase = [outputs] self.assertEqual(len(A ) ,len(self.tool.outputs ) ) for output, output_type in zip(A ,self.tool.outputs ): UpperCAmelCase = AGENT_TYPE_MAPPING[output_type] self.assertTrue(isinstance(A ,A ) ) def _UpperCamelCase ( self ): UpperCAmelCase = create_inputs(self.tool.inputs ) UpperCAmelCase = [] for _input, input_type in zip(A ,self.tool.inputs ): if isinstance(A ,A ): _inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] ) else: _inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) ) # Should not raise an error UpperCAmelCase = self.tool(*A ) if not isinstance(A ,A ): UpperCAmelCase = [outputs] self.assertEqual(len(A ) ,len(self.tool.outputs ) )
234
0
import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class lowercase__ ( unittest.TestCase ): a_ =MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )-> Union[str, Any]: '''simple docstring''' lowerCAmelCase__ = hf_hub_download( repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" ) lowerCAmelCase__ = VideoClassificationPipeline(model=__UpperCAmelCase , image_processor=__UpperCAmelCase , top_k=2 ) lowerCAmelCase__ = [ example_video_filepath, "https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4", ] return video_classifier, examples def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> List[str]: '''simple docstring''' for example in examples: lowerCAmelCase__ = video_classifier(__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ {"score": ANY(__UpperCAmelCase ), "label": ANY(__UpperCAmelCase )}, {"score": ANY(__UpperCAmelCase ), "label": ANY(__UpperCAmelCase )}, ] , ) @require_torch def UpperCAmelCase ( self )-> Optional[Any]: '''simple docstring''' lowerCAmelCase__ = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification" lowerCAmelCase__ = VideoMAEFeatureExtractor( size={"shortest_edge": 10} , crop_size={"height": 10, "width": 10} ) lowerCAmelCase__ = pipeline( "video-classification" , model=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , frame_sampling_rate=4 ) lowerCAmelCase__ = hf_hub_download(repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" ) lowerCAmelCase__ = video_classifier(__UpperCAmelCase , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}] , ) lowerCAmelCase__ = video_classifier( [ video_file_path, video_file_path, ] , top_k=2 , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ [{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}], [{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}], ] , ) @require_tf def UpperCAmelCase ( self )-> Any: '''simple docstring''' pass
340
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD torch.set_grad_enabled(False) def _a ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int=False ) -> Tuple: """simple docstring""" lowerCAmelCase__ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") ) rename_keys.append( (F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") ) rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") ) # projection layer + position embeddings rename_keys.extend( [ ("module.cls_token", "vit.embeddings.cls_token"), ("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"), ("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"), ("module.pos_embed", "vit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("module.norm.weight", "layernorm.weight"), ("module.norm.bias", "layernorm.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" lowerCAmelCase__ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) return rename_keys def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : str=False ) -> List[str]: """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: lowerCAmelCase__ = "" else: lowerCAmelCase__ = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCAmelCase__ = state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" ) lowerCAmelCase__ = state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase__ = in_proj_weight[ : config.hidden_size, : ] lowerCAmelCase__ = in_proj_bias[: config.hidden_size] lowerCAmelCase__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCAmelCase__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCAmelCase__ = in_proj_weight[ -config.hidden_size :, : ] lowerCAmelCase__ = in_proj_bias[-config.hidden_size :] def _a ( UpperCamelCase_ : Dict ) -> Tuple: """simple docstring""" lowerCAmelCase__ = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(UpperCamelCase_ , UpperCamelCase_ ) def _a ( UpperCamelCase_ : Union[str, Any] ) -> Optional[int]: """simple docstring""" lowerCAmelCase__ = [ "module.fc.fc1.weight", "module.fc.fc1.bias", "module.fc.bn1.weight", "module.fc.bn1.bias", "module.fc.bn1.running_mean", "module.fc.bn1.running_var", "module.fc.bn1.num_batches_tracked", "module.fc.fc2.weight", "module.fc.fc2.bias", "module.fc.bn2.weight", "module.fc.bn2.bias", "module.fc.bn2.running_mean", "module.fc.bn2.running_var", "module.fc.bn2.num_batches_tracked", "module.fc.fc3.weight", "module.fc.fc3.bias", ] for k in ignore_keys: state_dict.pop(UpperCamelCase_ , UpperCamelCase_ ) def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any ) -> List[Any]: """simple docstring""" lowerCAmelCase__ = dct.pop(UpperCamelCase_ ) lowerCAmelCase__ = val def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] ) -> Tuple: """simple docstring""" lowerCAmelCase__ = ViTMSNConfig() lowerCAmelCase__ = 1_000 lowerCAmelCase__ = "datasets/huggingface/label-files" lowerCAmelCase__ = "imagenet-1k-id2label.json" lowerCAmelCase__ = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ ) , "r" ) ) lowerCAmelCase__ = {int(UpperCamelCase_ ): v for k, v in idalabel.items()} lowerCAmelCase__ = idalabel lowerCAmelCase__ = {v: k for k, v in idalabel.items()} if "s16" in checkpoint_url: lowerCAmelCase__ = 384 lowerCAmelCase__ = 1_536 lowerCAmelCase__ = 6 elif "l16" in checkpoint_url: lowerCAmelCase__ = 1_024 lowerCAmelCase__ = 4_096 lowerCAmelCase__ = 24 lowerCAmelCase__ = 16 lowerCAmelCase__ = 0.1 elif "b4" in checkpoint_url: lowerCAmelCase__ = 4 elif "l7" in checkpoint_url: lowerCAmelCase__ = 7 lowerCAmelCase__ = 1_024 lowerCAmelCase__ = 4_096 lowerCAmelCase__ = 24 lowerCAmelCase__ = 16 lowerCAmelCase__ = 0.1 lowerCAmelCase__ = ViTMSNModel(UpperCamelCase_ ) lowerCAmelCase__ = torch.hub.load_state_dict_from_url(UpperCamelCase_ , map_location="cpu" )["target_encoder"] lowerCAmelCase__ = ViTImageProcessor(size=config.image_size ) remove_projection_head(UpperCamelCase_ ) lowerCAmelCase__ = create_rename_keys(UpperCamelCase_ , base_model=UpperCamelCase_ ) for src, dest in rename_keys: rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) read_in_q_k_v(UpperCamelCase_ , UpperCamelCase_ , base_model=UpperCamelCase_ ) model.load_state_dict(UpperCamelCase_ ) model.eval() lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg" lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ) lowerCAmelCase__ = ViTImageProcessor( size=config.image_size , image_mean=UpperCamelCase_ , image_std=UpperCamelCase_ ) lowerCAmelCase__ = image_processor(images=UpperCamelCase_ , return_tensors="pt" ) # forward pass torch.manual_seed(2 ) lowerCAmelCase__ = model(**UpperCamelCase_ ) lowerCAmelCase__ = outputs.last_hidden_state # The following Colab Notebook was used to generate these outputs: # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb if "s16" in checkpoint_url: lowerCAmelCase__ = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] ) elif "b16" in checkpoint_url: lowerCAmelCase__ = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] ) elif "l16" in checkpoint_url: lowerCAmelCase__ = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] ) elif "b4" in checkpoint_url: lowerCAmelCase__ = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] ) else: lowerCAmelCase__ = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] ) # verify logits assert torch.allclose(last_hidden_state[:, 0, :3] , UpperCamelCase_ , atol=1e-4 ) print(F"Saving model to {pytorch_dump_folder_path}" ) model.save_pretrained(UpperCamelCase_ ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(UpperCamelCase_ ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''', type=str, help='''URL of the checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) a_ = parser.parse_args() convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
340
1
import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotConfig, is_flax_available from transformers.testing_utils import jax_device, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html lowercase : List[str] = 'platform' import jax import jax.numpy as jnp from transformers import BlenderbotTokenizer from transformers.models.blenderbot.modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, shift_tokens_right, ) def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : Tuple , _lowerCamelCase : Tuple=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : int=None , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Optional[Any]=None , ) -> List[str]: '''simple docstring''' if attention_mask is None: __UpperCamelCase : Union[str, Any] = np.where(input_ids != config.pad_token_id , 1 , 0) if decoder_attention_mask is None: __UpperCamelCase : Any = np.where(decoder_input_ids != config.pad_token_id , 1 , 0) if head_mask is None: __UpperCamelCase : Union[str, Any] = np.ones((config.encoder_layers, config.encoder_attention_heads)) if decoder_head_mask is None: __UpperCamelCase : str = np.ones((config.decoder_layers, config.decoder_attention_heads)) if cross_attn_head_mask is None: __UpperCamelCase : str = np.ones((config.decoder_layers, config.decoder_attention_heads)) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class lowerCamelCase__ : '''simple docstring''' def __init__( self :List[str] , a :List[str] , a :Optional[Any]=1_3 , a :List[Any]=7 , a :Optional[Any]=True , a :Any=False , a :Union[str, Any]=9_9 , a :List[str]=1_6 , a :Tuple=2 , a :Dict=4 , a :List[Any]=4 , a :Optional[int]="gelu" , a :str=0.1 , a :Union[str, Any]=0.1 , a :List[Any]=3_2 , a :Optional[Any]=2 , a :Optional[Any]=1 , a :Union[str, Any]=0 , a :Tuple=0.02 , ) -> Optional[Any]: __UpperCamelCase : str = parent __UpperCamelCase : Any = batch_size __UpperCamelCase : List[Any] = seq_length __UpperCamelCase : List[str] = is_training __UpperCamelCase : Any = use_labels __UpperCamelCase : Optional[int] = vocab_size __UpperCamelCase : Optional[Any] = hidden_size __UpperCamelCase : List[Any] = num_hidden_layers __UpperCamelCase : int = num_attention_heads __UpperCamelCase : int = intermediate_size __UpperCamelCase : List[Any] = hidden_act __UpperCamelCase : int = hidden_dropout_prob __UpperCamelCase : Any = attention_probs_dropout_prob __UpperCamelCase : List[str] = max_position_embeddings __UpperCamelCase : Optional[int] = eos_token_id __UpperCamelCase : Any = pad_token_id __UpperCamelCase : Any = bos_token_id __UpperCamelCase : Optional[int] = initializer_range def _lowerCamelCase ( self :Dict ) -> List[str]: __UpperCamelCase : Optional[Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) __UpperCamelCase : int = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) __UpperCamelCase : Union[str, Any] = shift_tokens_right(a , 1 , 2 ) __UpperCamelCase : Any = BlenderbotConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=a , ) __UpperCamelCase : int = prepare_blenderbot_inputs_dict(a , a , a ) return config, inputs_dict def _lowerCamelCase ( self :Dict ) -> str: __UpperCamelCase : List[Any] = self.prepare_config_and_inputs() return config, inputs_dict def _lowerCamelCase ( self :Tuple , a :Any , a :Union[str, Any] , a :Any ) -> int: __UpperCamelCase : Union[str, Any] = 2_0 __UpperCamelCase : Dict = model_class_name(a ) __UpperCamelCase : Optional[int] = model.encode(inputs_dict["input_ids"] ) __UpperCamelCase : Optional[int] = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) __UpperCamelCase : Dict = model.init_cache(decoder_input_ids.shape[0] , a , a ) __UpperCamelCase : Optional[int] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" ) __UpperCamelCase : Union[str, Any] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __UpperCamelCase : int = model.decode( decoder_input_ids[:, :-1] , a , decoder_attention_mask=a , past_key_values=a , decoder_position_ids=a , ) __UpperCamelCase : Tuple = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" ) __UpperCamelCase : Dict = model.decode( decoder_input_ids[:, -1:] , a , decoder_attention_mask=a , past_key_values=outputs_cache.past_key_values , decoder_position_ids=a , ) __UpperCamelCase : Optional[Any] = model.decode(a , a ) __UpperCamelCase : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}' ) def _lowerCamelCase ( self :Dict , a :Optional[int] , a :Dict , a :Union[str, Any] ) -> List[str]: __UpperCamelCase : Union[str, Any] = 2_0 __UpperCamelCase : List[Any] = model_class_name(a ) __UpperCamelCase : List[Any] = model.encode(inputs_dict["input_ids"] ) __UpperCamelCase : Optional[int] = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) __UpperCamelCase : int = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) __UpperCamelCase : Any = model.init_cache(decoder_input_ids.shape[0] , a , a ) __UpperCamelCase : List[Any] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __UpperCamelCase : List[Any] = model.decode( decoder_input_ids[:, :-1] , a , decoder_attention_mask=a , past_key_values=a , decoder_position_ids=a , ) __UpperCamelCase : List[str] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" ) __UpperCamelCase : List[Any] = model.decode( decoder_input_ids[:, -1:] , a , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=a , decoder_position_ids=a , ) __UpperCamelCase : int = model.decode(a , a , decoder_attention_mask=a ) __UpperCamelCase : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}' ) @require_flax class lowerCamelCase__ ( unittest.TestCase): '''simple docstring''' _A = 9_9 def _lowerCamelCase ( self :int ) -> str: __UpperCamelCase : Any = np.array( [ [7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2], [5, 9_7, 1_7, 3_9, 9_4, 4_0, 2], [7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2], [8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2], [5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding [6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2], [5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2], [4_8, 6_1, 9, 2_4, 7_1, 8_2, 2], [2_6, 1, 6_0, 4_8, 2_2, 1_3, 2], [2_1, 5, 6_2, 2_8, 1_4, 7_6, 2], [4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2], [7_0, 7_0, 5_0, 9, 2_8, 0, 2], ] , dtype=np.intaa , ) __UpperCamelCase : Dict = input_ids.shape[0] __UpperCamelCase : List[Any] = BlenderbotConfig( vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def _lowerCamelCase ( self :int ) -> int: __UpperCamelCase : Tuple = self._get_config_and_data() __UpperCamelCase : Union[str, Any] = FlaxBlenderbotForConditionalGeneration(a ) __UpperCamelCase : Dict = lm_model(input_ids=a ) __UpperCamelCase : str = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs["logits"].shape , a ) def _lowerCamelCase ( self :List[str] ) -> Union[str, Any]: __UpperCamelCase : Any = BlenderbotConfig( vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , ) __UpperCamelCase : List[str] = FlaxBlenderbotForConditionalGeneration(a ) __UpperCamelCase : Optional[Any] = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa ) __UpperCamelCase : Tuple = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa ) __UpperCamelCase : Dict = lm_model(input_ids=a , decoder_input_ids=a ) __UpperCamelCase : List[str] = (*summary.shape, config.vocab_size) self.assertEqual(outputs["logits"].shape , a ) def _lowerCamelCase ( self :Dict ) -> Dict: __UpperCamelCase : Optional[Any] = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa ) __UpperCamelCase : List[Any] = shift_tokens_right(a , 1 , 2 ) __UpperCamelCase : int = np.equal(a , 1 ).astype(np.floataa ).sum() __UpperCamelCase : Optional[Any] = np.equal(a , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(a , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class lowerCamelCase__ ( __lowercase , unittest.TestCase , __lowercase): '''simple docstring''' _A = True _A = ( ( FlaxBlenderbotModel, FlaxBlenderbotForConditionalGeneration, ) if is_flax_available() else () ) _A = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else () def _lowerCamelCase ( self :Tuple ) -> int: __UpperCamelCase : Any = FlaxBlenderbotModelTester(self ) def _lowerCamelCase ( self :str ) -> int: __UpperCamelCase : int = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(a , a , a ) def _lowerCamelCase ( self :List[Any] ) -> List[Any]: __UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(a , a , a ) def _lowerCamelCase ( self :str ) -> Union[str, Any]: __UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCamelCase : List[str] = self._prepare_for_class(a , a ) __UpperCamelCase : Dict = model_class(a ) @jax.jit def encode_jitted(a :List[Any] , a :Any=None , **a :Dict ): return model.encode(input_ids=a , attention_mask=a ) with self.subTest("JIT Enabled" ): __UpperCamelCase : List[str] = encode_jitted(**a ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): __UpperCamelCase : List[str] = encode_jitted(**a ).to_tuple() self.assertEqual(len(a ) , len(a ) ) for jitted_output, output in zip(a , a ): self.assertEqual(jitted_output.shape , output.shape ) def _lowerCamelCase ( self :List[str] ) -> List[Any]: __UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCamelCase : List[str] = model_class(a ) __UpperCamelCase : Any = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] ) __UpperCamelCase : List[str] = { "decoder_input_ids": inputs_dict["decoder_input_ids"], "decoder_attention_mask": inputs_dict["decoder_attention_mask"], "encoder_outputs": encoder_outputs, } @jax.jit def decode_jitted(a :Tuple , a :Optional[Any] , a :Optional[int] ): return model.decode( decoder_input_ids=a , decoder_attention_mask=a , encoder_outputs=a , ) with self.subTest("JIT Enabled" ): __UpperCamelCase : Optional[Any] = decode_jitted(**a ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): __UpperCamelCase : Any = decode_jitted(**a ).to_tuple() self.assertEqual(len(a ) , len(a ) ) for jitted_output, output in zip(a , a ): self.assertEqual(jitted_output.shape , output.shape ) @slow def _lowerCamelCase ( self :Any ) -> int: for model_class_name in self.all_model_classes: __UpperCamelCase : int = model_class_name.from_pretrained("facebook/blenderbot-400M-distill" ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids __UpperCamelCase : Optional[int] = np.ones((1, 1) ) * model.config.eos_token_id __UpperCamelCase : Optional[int] = model(a ) self.assertIsNotNone(a ) @unittest.skipUnless(jax_device != "cpu" , "3B test too slow on CPU." ) @slow def _lowerCamelCase ( self :int ) -> Dict: __UpperCamelCase : Dict = {"num_beams": 1, "early_stopping": True, "min_length": 1_5, "max_length": 2_5} __UpperCamelCase : List[Any] = {"skip_special_tokens": True, "clean_up_tokenization_spaces": True} __UpperCamelCase : Optional[int] = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-3B" , from_pt=a ) __UpperCamelCase : Tuple = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B" ) __UpperCamelCase : Optional[int] = ["Sam"] __UpperCamelCase : int = tokenizer(a , return_tensors="jax" ) __UpperCamelCase : Tuple = model.generate(**a , **a ) __UpperCamelCase : int = "Sam is a great name. It means \"sun\" in Gaelic." __UpperCamelCase : str = tokenizer.batch_decode(a , **a ) assert generated_txt[0].strip() == tgt_text
361
import importlib.util import os import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import ( is_accelerate_available, is_flax_available, is_safetensors_available, is_tf_available, is_torch_available, ) from . import BaseTransformersCLICommand def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int]) -> Dict: '''simple docstring''' return EnvironmentCommand() def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict) -> Dict: '''simple docstring''' return EnvironmentCommand(args.accelerate_config_file) class lowerCamelCase__ ( __lowercase): '''simple docstring''' @staticmethod def _lowerCamelCase ( a :ArgumentParser ) -> str: __UpperCamelCase : List[Any] = parser.add_parser("env" ) download_parser.set_defaults(func=a ) download_parser.add_argument( "--accelerate-config_file" , default=a , help="The accelerate config file to use for the default values in the launching script." , ) download_parser.set_defaults(func=a ) def __init__( self :Tuple , a :Dict , *a :List[str] ) -> None: __UpperCamelCase : List[str] = accelerate_config_file def _lowerCamelCase ( self :int ) -> Dict: __UpperCamelCase : int = "not installed" if is_safetensors_available(): import safetensors __UpperCamelCase : List[str] = safetensors.__version__ elif importlib.util.find_spec("safetensors" ) is not None: import safetensors __UpperCamelCase : Optional[Any] = f'{safetensors.__version__} but is ignored because of PyTorch version too old.' __UpperCamelCase : List[str] = "not installed" __UpperCamelCase : List[str] = "not found" if is_accelerate_available(): import accelerate from accelerate.commands.config import default_config_file, load_config_from_file __UpperCamelCase : Tuple = accelerate.__version__ # Get the default from the config file. if self._accelerate_config_file is not None or os.path.isfile(a ): __UpperCamelCase : Dict = load_config_from_file(self._accelerate_config_file ).to_dict() __UpperCamelCase : int = ( "\n".join([f'\t- {prop}: {val}' for prop, val in accelerate_config.items()] ) if isinstance(a , a ) else f'\t{accelerate_config}' ) __UpperCamelCase : List[Any] = "not installed" __UpperCamelCase : Dict = "NA" if is_torch_available(): import torch __UpperCamelCase : Optional[int] = torch.__version__ __UpperCamelCase : Optional[Any] = torch.cuda.is_available() __UpperCamelCase : Dict = "not installed" __UpperCamelCase : str = "NA" if is_tf_available(): import tensorflow as tf __UpperCamelCase : Optional[Any] = tf.__version__ try: # deprecated in v2.1 __UpperCamelCase : Dict = tf.test.is_gpu_available() except AttributeError: # returns list of devices, convert to bool __UpperCamelCase : Optional[Any] = bool(tf.config.list_physical_devices("GPU" ) ) __UpperCamelCase : List[Any] = "not installed" __UpperCamelCase : Any = "not installed" __UpperCamelCase : Tuple = "not installed" __UpperCamelCase : Optional[int] = "NA" if is_flax_available(): import flax import jax import jaxlib __UpperCamelCase : int = flax.__version__ __UpperCamelCase : Any = jax.__version__ __UpperCamelCase : Optional[int] = jaxlib.__version__ __UpperCamelCase : List[Any] = jax.lib.xla_bridge.get_backend().platform __UpperCamelCase : Optional[Any] = { "`transformers` version": version, "Platform": platform.platform(), "Python version": platform.python_version(), "Huggingface_hub version": huggingface_hub.__version__, "Safetensors version": f'{safetensors_version}', "Accelerate version": f'{accelerate_version}', "Accelerate config": f'{accelerate_config_str}', "PyTorch version (GPU?)": f'{pt_version} ({pt_cuda_available})', "Tensorflow version (GPU?)": f'{tf_version} ({tf_cuda_available})', "Flax version (CPU?/GPU?/TPU?)": f'{flax_version} ({jax_backend})', "Jax version": f'{jax_version}', "JaxLib version": f'{jaxlib_version}', "Using GPU in script?": "<fill in>", "Using distributed or parallel set-up in script?": "<fill in>", } print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" ) print(self.format_dict(a ) ) return info @staticmethod def _lowerCamelCase ( a :str ) -> int: return "\n".join([f'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
151
0
from manim import * class a__ ( UpperCAmelCase ): """simple docstring""" def _lowercase ( self : Tuple ) ->int: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = Rectangle(height=0.5 , width=0.5 ) SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.25 , width=0.25 ) SCREAMING_SNAKE_CASE : Any = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) SCREAMING_SNAKE_CASE : Union[str, Any] = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE : Any = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE : int = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 ) SCREAMING_SNAKE_CASE : Tuple = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 ) SCREAMING_SNAKE_CASE : Optional[Any] = VGroup(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 ) SCREAMING_SNAKE_CASE : Optional[int] = Text("""CPU""" , font_size=2_4 ) SCREAMING_SNAKE_CASE : Optional[int] = Group(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0.5 , aligned_edge=UpperCAmelCase__ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : List[Any] = [mem.copy() for i in range(4 )] SCREAMING_SNAKE_CASE : Any = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 ) SCREAMING_SNAKE_CASE : List[Any] = Text("""GPU""" , font_size=2_4 ) SCREAMING_SNAKE_CASE : Tuple = Group(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0.5 , aligned_edge=UpperCAmelCase__ ) gpu.move_to([-1, -1, 0] ) self.add(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : Optional[int] = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE : Any = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 ) SCREAMING_SNAKE_CASE : Tuple = Text("""Model""" , font_size=2_4 ) SCREAMING_SNAKE_CASE : Tuple = Group(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0.5 , aligned_edge=UpperCAmelCase__ ) model.move_to([3, -1.0, 0] ) self.add(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : str = [] SCREAMING_SNAKE_CASE : str = [] SCREAMING_SNAKE_CASE : Any = [] for i, rect in enumerate(UpperCAmelCase__ ): rect.set_stroke(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : Tuple = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase__ , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=UpperCAmelCase__ ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] , direction=UpperCAmelCase__ , buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] , direction=UpperCAmelCase__ , buff=0.0 ) self.add(UpperCAmelCase__ ) model_cpu_arr.append(UpperCAmelCase__ ) self.add(*UpperCAmelCase__ , *UpperCAmelCase__ , *UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : Tuple = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE : Any = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 ) SCREAMING_SNAKE_CASE : List[str] = Text("""Loaded Checkpoint""" , font_size=2_4 ) SCREAMING_SNAKE_CASE : int = Group(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0.5 , aligned_edge=UpperCAmelCase__ ) checkpoint.move_to([3, 0.5, 0] ) self.add(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : Tuple = [] SCREAMING_SNAKE_CASE : Dict = [] for i, rect in enumerate(UpperCAmelCase__ ): SCREAMING_SNAKE_CASE : Dict = fill.copy().set_fill(UpperCAmelCase__ , opacity=0.7 ) target.move_to(UpperCAmelCase__ ) ckpt_arr.append(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : Any = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(UpperCAmelCase__ ) self.add(*UpperCAmelCase__ , *UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : Optional[Any] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) SCREAMING_SNAKE_CASE : List[str] = MarkupText( f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=1_8 , ) key_text.move_to([-5, 2.4, 0] ) self.add(UpperCAmelCase__ , UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : List[str] = MarkupText( f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=1_8 , ) blue_text.next_to(UpperCAmelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : List[Any] = MarkupText( f"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device." , font_size=2_4 , ) step_a.move_to([2, 2, 0] ) SCREAMING_SNAKE_CASE : Union[str, Any] = [meta_mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE : List[str] = [meta_mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE : List[Any] = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 ) SCREAMING_SNAKE_CASE : Optional[int] = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 ) SCREAMING_SNAKE_CASE : List[str] = VGroup(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 ) SCREAMING_SNAKE_CASE : List[Any] = Text("""Disk""" , font_size=2_4 ) SCREAMING_SNAKE_CASE : Tuple = Group(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0.5 , aligned_edge=UpperCAmelCase__ ) disk.move_to([-4.0, -1.25, 0] ) self.play(Write(UpperCAmelCase__ , run_time=3 ) , Write(UpperCAmelCase__ , run_time=1 ) , Create(UpperCAmelCase__ , run_time=1 ) ) SCREAMING_SNAKE_CASE : int = [] for i, rect in enumerate(UpperCAmelCase__ ): SCREAMING_SNAKE_CASE : Dict = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(UpperCAmelCase__ , run_time=1.5 ) ) self.play(*UpperCAmelCase__ ) self.play(FadeOut(UpperCAmelCase__ ) ) SCREAMING_SNAKE_CASE : Union[str, Any] = MarkupText(f"Then, the checkpoint is removed from memory\nthrough garbage collection." , font_size=2_4 ) step_a.move_to([2, 2, 0] ) self.play(Write(UpperCAmelCase__ , run_time=3 ) ) self.play( FadeOut(UpperCAmelCase__ , UpperCAmelCase__ , *UpperCAmelCase__ , *UpperCAmelCase__ ) , ) self.wait()
245
from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeqaSeqLM, TFAutoModelForSpeechSeqaSeq, TFAutoModelForVisionaSeq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class a__ ( unittest.TestCase ): """simple docstring""" def _lowercase ( self : Union[str, Any] ) ->List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = tf.convert_to_tensor( [ [ 8.2_22_09_91, # 3rd highest value; idx. 0 -0.5_62_00_44, 5.23_22_97_52, 4.0_38_63_93, -6.8_79_83_78, -0.54_78_58_02, -3.2_01_21_53, 2.92_77_71_76, 1.88_17_19_53, 7.35_34_12_76, # 5th highest value; idx. 9 8.43_20_78_33, # 2nd highest value; idx. 10 -9.85_71_18_36, -5.96_20_92_36, -1.13_03_91_61, -7.1_11_52_94, -0.8_36_96_33, -5.3_18_64_08, 7.06_42_74_07, 0.81_36_93_44, -0.82_02_38_17, -5.9_17_97_96, 0.58_81_34_43, -6.99_77_84_38, 4.71_55_11_89, -0.18_77_16_37, 7.44_02_07_59, # 4th highest value; idx. 25 9.38_45_09_87, # 1st highest value; idx. 26 2.12_66_29_41, -9.32_56_20_38, 2.35_65_25_22, ], # cummulative prob of 5 highest values <= 0.6 [ 0.58_42_55_18, 4.53_13_92_38, -5.57_51_04_64, -6.28_03_06_99, -7.19_52_95_03, -4.02_12_25_51, 1.39_33_70_37, -6.06_70_70_57, 1.59_48_05_17, -9.64_31_19, 0.03_90_77_99, 0.67_23_17_62, -8.88_20_67_26, 6.27_11_59_22, # 4th highest value; idx. 13 2.28_52_07_23, 4.82_76_75_06, 4.30_42_13_68, 8.8_27_53_13, # 2nd highest value; idx. 17 5.44_02_99_58, # 5th highest value; idx. 18 -4.4_73_57_94, 7.38_57_95_36, # 3rd highest value; idx. 20 -2.91_05_16_63, 2.61_94_60_77, -2.5_67_47_62, -9.48_95_93_02, -4.02_92_26_45, -1.35_41_69_18, 9.67_70_23_23, # 1st highest value; idx. 27 -5.89_47_85_53, 1.85_37_04_67, ], # cummulative prob of 5 highest values <= 0.6 ] , dtype=tf.floataa , ) SCREAMING_SNAKE_CASE : List[Any] = tf.convert_to_tensor( [[0, 0], [0, 9], [0, 1_0], [0, 2_5], [0, 2_6], [1, 1_3], [1, 1_7], [1, 1_8], [1, 2_0], [1, 2_7]] , dtype=tf.intaa , ) # expected non filtered idx as noted above SCREAMING_SNAKE_CASE : Optional[int] = tf.convert_to_tensor( [8.22_20_99, 7.3_53_41_26, 8.43_20_78, 7.4_40_20_75, 9.3_84_51, 6.27_11_59, 8.82_75_31, 5.4_40_29_95, 7.3_85_79_56, 9.67_70_23] , dtype=tf.floataa , ) # expected non filtered values as noted above SCREAMING_SNAKE_CASE : Dict = tf_top_k_top_p_filtering(UpperCAmelCase__ , top_k=1_0 , top_p=0.6 , min_tokens_to_keep=4 ) SCREAMING_SNAKE_CASE : str = output[output != -float("""inf""" )] SCREAMING_SNAKE_CASE : Optional[int] = tf.cast( tf.where(tf.not_equal(UpperCAmelCase__ , tf.constant(-float("""inf""" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , ) tf.debugging.assert_near(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1e-12 ) tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ ) @require_tf class a__ ( unittest.TestCase , UpperCAmelCase ): """simple docstring""" if is_tf_available(): UpperCAmelCase__ : Optional[Any] ={ """AutoModelForCausalLM""": TFAutoModelForCausalLM, """AutoModelForSpeechSeq2Seq""": TFAutoModelForSpeechSeqaSeq, """AutoModelForSeq2SeqLM""": TFAutoModelForSeqaSeqLM, """AutoModelForVision2Seq""": TFAutoModelForVisionaSeq, """LogitsProcessorList""": TFLogitsProcessorList, """MinLengthLogitsProcessor""": TFMinLengthLogitsProcessor, """create_tensor_fn""": tf.convert_to_tensor, """floats_tensor""": floats_tensor, """return_tensors""": """tf""", } @slow def _lowercase ( self : int ) ->List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) SCREAMING_SNAKE_CASE : str = 2 SCREAMING_SNAKE_CASE : Tuple = 2 class a__ ( tf.Module ): """simple docstring""" def __init__( self : Tuple , UpperCAmelCase__ : Optional[int] ) ->str: """simple docstring""" super(UpperCAmelCase__ , self ).__init__() SCREAMING_SNAKE_CASE : Optional[int] = model @tf.function( input_signature=( tf.TensorSpec((None, input_length) , tf.intaa , name="""input_ids""" ), tf.TensorSpec((None, input_length) , tf.intaa , name="""attention_mask""" ), ) , jit_compile=UpperCAmelCase__ , ) def _lowercase ( self : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] ) ->List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = self.model.generate( input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , ) return {"sequences": outputs["sequences"]} SCREAMING_SNAKE_CASE : Any = [[2, 0], [1_0_2, 1_0_3]] SCREAMING_SNAKE_CASE : Tuple = [[1, 0], [1, 1]] SCREAMING_SNAKE_CASE : Dict = DummyModel(model=UpperCAmelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(UpperCAmelCase__ , UpperCAmelCase__ , signatures={"""serving_default""": dummy_model.serving} ) SCREAMING_SNAKE_CASE : Optional[int] = tf.saved_model.load(UpperCAmelCase__ ).signatures["""serving_default"""] for batch_size in range(1 , len(UpperCAmelCase__ ) + 1 ): SCREAMING_SNAKE_CASE : int = { """input_ids""": tf.constant(dummy_input_ids[:batch_size] ), """attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ), } SCREAMING_SNAKE_CASE : Tuple = serving_func(**UpperCAmelCase__ )["""sequences"""] SCREAMING_SNAKE_CASE : List[str] = test_model.generate(**UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ ) tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ ) @slow def _lowercase ( self : Dict ) ->int: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) SCREAMING_SNAKE_CASE : Any = 1 SCREAMING_SNAKE_CASE : int = 2 class a__ ( tf.Module ): """simple docstring""" def __init__( self : List[Any] , UpperCAmelCase__ : Optional[Any] ) ->Optional[int]: """simple docstring""" super(UpperCAmelCase__ , self ).__init__() SCREAMING_SNAKE_CASE : List[str] = model @tf.function( input_signature=( tf.TensorSpec((batch_size, None) , tf.intaa , name="""input_ids""" ), tf.TensorSpec((batch_size, None) , tf.intaa , name="""attention_mask""" ), ) , jit_compile=UpperCAmelCase__ , ) def _lowercase ( self : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) ->Any: """simple docstring""" SCREAMING_SNAKE_CASE : int = self.model.generate( input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , ) return {"sequences": outputs["sequences"]} SCREAMING_SNAKE_CASE : List[Any] = [[2], [1_0_2, 1_0_3]] SCREAMING_SNAKE_CASE : List[Any] = [[1], [1, 1]] SCREAMING_SNAKE_CASE : Union[str, Any] = DummyModel(model=UpperCAmelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(UpperCAmelCase__ , UpperCAmelCase__ , signatures={"""serving_default""": dummy_model.serving} ) SCREAMING_SNAKE_CASE : int = tf.saved_model.load(UpperCAmelCase__ ).signatures["""serving_default"""] for input_row in range(len(UpperCAmelCase__ ) ): SCREAMING_SNAKE_CASE : str = { """input_ids""": tf.constant([dummy_input_ids[input_row]] ), """attention_mask""": tf.constant([dummy_attention_masks[input_row]] ), } SCREAMING_SNAKE_CASE : List[str] = serving_func(**UpperCAmelCase__ )["""sequences"""] SCREAMING_SNAKE_CASE : List[Any] = test_model.generate(**UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ ) tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ ) @slow @require_tensorflow_text def _lowercase ( self : Optional[Any] ) ->Tuple: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id="""google/flan-t5-small""" , filename="""spiece.model""" , local_dir=UpperCAmelCase__ ) class a__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : Optional[Any] ) ->List[str]: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE : Any = text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(UpperCAmelCase__ , """spiece.model""" ) , """rb""" ).read() ) SCREAMING_SNAKE_CASE : Dict = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) def _lowercase ( self : int , UpperCAmelCase__ : Any , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : str ) ->int: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.tokenize(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = text.pad_model_inputs( UpperCAmelCase__ , max_seq_length=6_4 , pad_value=self.model.config.pad_token_id ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.model.generate(input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ ) return self.tokenizer.detokenize(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : str = CompleteSentenceTransformer() SCREAMING_SNAKE_CASE : Tuple = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="""inputs""" ) SCREAMING_SNAKE_CASE : str = complete_model(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : Optional[int] = tf.keras.Model(UpperCAmelCase__ , UpperCAmelCase__ ) keras_model.save(UpperCAmelCase__ ) def _lowercase ( self : Optional[Any] ) ->List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = { """do_sample""": True, """num_beams""": 1, """top_p""": 0.7, """top_k""": 1_0, """temperature""": 0.7, } SCREAMING_SNAKE_CASE : Tuple = 1_4 SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) SCREAMING_SNAKE_CASE : List[Any] = """Hello, my dog is cute and""" SCREAMING_SNAKE_CASE : Tuple = tokenizer(UpperCAmelCase__ , return_tensors="""tf""" ) SCREAMING_SNAKE_CASE : Optional[int] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) SCREAMING_SNAKE_CASE : Dict = 6_3_8 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(""":/CPU:0""" ): tf.random.set_seed(0 ) SCREAMING_SNAKE_CASE : int = model.generate(**UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) SCREAMING_SNAKE_CASE : Dict = [6_3_8, 1_9_8] with tf.device(""":/CPU:0""" ): tf.random.set_seed(0 ) SCREAMING_SNAKE_CASE : Dict = model.generate(**UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) def _lowercase ( self : str ) ->List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" ) SCREAMING_SNAKE_CASE : List[Any] = """Hugging Face is a technology company based in New York and Paris.""" SCREAMING_SNAKE_CASE : Optional[int] = bart_tokenizer(UpperCAmelCase__ , return_tensors="""tf""" ).input_ids SCREAMING_SNAKE_CASE : int = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" ) SCREAMING_SNAKE_CASE : Optional[int] = bart_model.generate(UpperCAmelCase__ ).numpy() class a__ ( UpperCAmelCase ): """simple docstring""" def _lowercase ( self : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : Dict ) ->List[str]: """simple docstring""" return super().call(UpperCAmelCase__ , **UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : Optional[int] = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" ) SCREAMING_SNAKE_CASE : Optional[int] = bart_model.generate(UpperCAmelCase__ , foo="""bar""" ).numpy() self.assertTrue(np.array_equal(UpperCAmelCase__ , UpperCAmelCase__ ) ) class a__ ( bart_model.model.encoder.__class__ ): """simple docstring""" def _lowercase ( self : List[Any] , UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Optional[int] ) ->Union[str, Any]: """simple docstring""" return super().call(UpperCAmelCase__ , **UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : Optional[Any] = FakeEncoder(bart_model.config , bart_model.model.shared ) SCREAMING_SNAKE_CASE : Tuple = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) SCREAMING_SNAKE_CASE : Tuple = bart_model.generate(UpperCAmelCase__ ).numpy() with self.assertRaises(UpperCAmelCase__ ): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(UpperCAmelCase__ , foo="""bar""" )
245
1
'''simple docstring''' import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class _a : '''simple docstring''' def __init__( self, A, A=2, A=True, A=False, A=10, A=3, A=32 * 4, A=32 * 6, A=4, A=32, ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = parent SCREAMING_SNAKE_CASE : int = batch_size SCREAMING_SNAKE_CASE : Tuple = is_training SCREAMING_SNAKE_CASE : str = use_auxiliary_loss SCREAMING_SNAKE_CASE : Optional[Any] = num_queries SCREAMING_SNAKE_CASE : List[str] = num_channels SCREAMING_SNAKE_CASE : Optional[Any] = min_size SCREAMING_SNAKE_CASE : Dict = max_size SCREAMING_SNAKE_CASE : List[Any] = num_labels SCREAMING_SNAKE_CASE : List[Any] = mask_feature_size def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( A ) SCREAMING_SNAKE_CASE : int = torch.ones([self.batch_size, self.min_size, self.max_size], device=A ) SCREAMING_SNAKE_CASE : List[Any] = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size], device=A ) > 0.5 ).float() SCREAMING_SNAKE_CASE : Union[str, Any] = (torch.rand((self.batch_size, self.num_labels), device=A ) > 0.5).long() SCREAMING_SNAKE_CASE : List[Any] = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def UpperCamelCase_ ( self ): '''simple docstring''' return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1], ), decoder_config=DetrConfig( decoder_ffn_dim=128, num_queries=self.num_queries, decoder_attention_heads=2, d_model=self.mask_feature_size, ), mask_feature_size=self.mask_feature_size, fpn_feature_size=self.mask_feature_size, num_channels=self.num_channels, num_labels=self.num_labels, ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE : Union[str, Any] = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask} return config, inputs_dict def UpperCamelCase_ ( self, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = output.encoder_hidden_states SCREAMING_SNAKE_CASE : Dict = output.pixel_decoder_hidden_states SCREAMING_SNAKE_CASE : str = output.transformer_decoder_hidden_states self.parent.assertTrue(len(A ), len(config.backbone_config.depths ) ) self.parent.assertTrue(len(A ), len(config.backbone_config.depths ) ) self.parent.assertTrue(len(A ), config.decoder_config.decoder_layers ) def UpperCamelCase_ ( self, A, A, A, A=False ): '''simple docstring''' with torch.no_grad(): SCREAMING_SNAKE_CASE : int = MaskFormerModel(config=A ) model.to(A ) model.eval() SCREAMING_SNAKE_CASE : List[Any] = model(pixel_values=A, pixel_mask=A ) SCREAMING_SNAKE_CASE : Union[str, Any] = model(A, output_hidden_states=A ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape, (self.batch_size, self.num_queries, self.mask_feature_size), ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(A, A ) def UpperCamelCase_ ( self, A, A, A, A, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = MaskFormerForInstanceSegmentation(config=A ) model.to(A ) model.eval() def comm_check_on_output(A ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape, (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4), ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): SCREAMING_SNAKE_CASE : Dict = model(pixel_values=A, pixel_mask=A ) SCREAMING_SNAKE_CASE : List[str] = model(A ) comm_check_on_output(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = model( pixel_values=A, pixel_mask=A, mask_labels=A, class_labels=A ) comm_check_on_output(A ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape, torch.Size([1] ) ) @require_torch class _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : Tuple = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () A : Any = ( {'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) A : int = False A : Dict = False A : str = False A : List[Any] = False def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = MaskFormerModelTester(self ) SCREAMING_SNAKE_CASE : str = ConfigTester(self, config_class=A, has_text_modality=A ) def UpperCamelCase_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(A, **A, output_hidden_states=A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*A ) @unittest.skip(reason='MaskFormer does not use inputs_embeds' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass @unittest.skip(reason='MaskFormer does not have a get_input_embeddings method' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass @unittest.skip(reason='MaskFormer is not a generative model' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass @unittest.skip(reason='MaskFormer does not use token embeddings' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip( reason='MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : Optional[Any] = model_class(A ) SCREAMING_SNAKE_CASE : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE : List[str] = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE : int = ["""pixel_values"""] self.assertListEqual(arg_names[:1], A ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' for model_name in ["facebook/maskformer-swin-small-coco"]: SCREAMING_SNAKE_CASE : List[str] = MaskFormerModel.from_pretrained(A ) self.assertIsNotNone(A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = (self.model_tester.min_size,) * 2 SCREAMING_SNAKE_CASE : Dict = { """pixel_values""": torch.randn((2, 3, *size), device=A ), """mask_labels""": torch.randn((2, 10, *size), device=A ), """class_labels""": torch.zeros(2, 10, device=A ).long(), } SCREAMING_SNAKE_CASE : Dict = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(A ) SCREAMING_SNAKE_CASE : str = model(**A ) self.assertTrue(outputs.loss is not None ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(A, **A, output_hidden_states=A ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : List[str] = model_class(A ).to(A ) SCREAMING_SNAKE_CASE : Tuple = model(**A, output_attentions=A ) self.assertTrue(outputs.attentions is not None ) def UpperCamelCase_ ( self ): '''simple docstring''' if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss SCREAMING_SNAKE_CASE : Optional[int] = self.all_model_classes[1] SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(A ) model.to(A ) model.train() SCREAMING_SNAKE_CASE : str = model(A, mask_labels=A, class_labels=A ).loss loss.backward() def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.all_model_classes[1] SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE : List[str] = True SCREAMING_SNAKE_CASE : List[str] = True SCREAMING_SNAKE_CASE : Dict = model_class(A ) model.to(A ) model.train() SCREAMING_SNAKE_CASE : Union[str, Any] = model(A, mask_labels=A, class_labels=A ) SCREAMING_SNAKE_CASE : Optional[Any] = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() SCREAMING_SNAKE_CASE : List[str] = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't SCREAMING_SNAKE_CASE : Optional[int] = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() SCREAMING_SNAKE_CASE : Tuple = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=A ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) UpperCamelCase_ = 1E-4 def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_vision @slow class _a ( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self ): '''simple docstring''' return ( MaskFormerImageProcessor.from_pretrained('facebook/maskformer-swin-small-coco' ) if is_vision_available() else None ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = MaskFormerModel.from_pretrained('facebook/maskformer-swin-small-coco' ).to(A ) SCREAMING_SNAKE_CASE : Dict = self.default_image_processor SCREAMING_SNAKE_CASE : Optional[int] = prepare_img() SCREAMING_SNAKE_CASE : Dict = image_processor(A, return_tensors='pt' ).to(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(A, (1, 3, 800, 1_088) ) with torch.no_grad(): SCREAMING_SNAKE_CASE : List[str] = model(**A ) SCREAMING_SNAKE_CASE : Any = torch.tensor( [[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(A ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3], A, atol=A ) ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor( [[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(A ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3], A, atol=A ) ) SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor( [[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(A ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3], A, atol=A ) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = ( MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' ) .to(A ) .eval() ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.default_image_processor SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img() SCREAMING_SNAKE_CASE : Optional[int] = image_processor(A, return_tensors='pt' ).to(A ) SCREAMING_SNAKE_CASE : Optional[int] = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(A, (1, 3, 800, 1_088) ) with torch.no_grad(): SCREAMING_SNAKE_CASE : List[str] = model(**A ) # masks_queries_logits SCREAMING_SNAKE_CASE : Dict = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape, (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4), ) SCREAMING_SNAKE_CASE : Any = [ [-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33], [-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95], [-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42], ] SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(A ).to(A ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3], A, atol=A ) ) # class_queries_logits SCREAMING_SNAKE_CASE : Optional[Any] = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape, (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) SCREAMING_SNAKE_CASE : Tuple = torch.tensor( [ [1.6512E00, -5.2572E00, -3.3519E00], [3.6169E-02, -5.9025E00, -2.9313E00], [1.0766E-04, -7.7630E00, -5.1263E00], ] ).to(A ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3], A, atol=A ) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = ( MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-resnet101-coco-stuff' ) .to(A ) .eval() ) SCREAMING_SNAKE_CASE : Tuple = self.default_image_processor SCREAMING_SNAKE_CASE : int = prepare_img() SCREAMING_SNAKE_CASE : Any = image_processor(A, return_tensors='pt' ).to(A ) SCREAMING_SNAKE_CASE : List[str] = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(A, (1, 3, 800, 1_088) ) with torch.no_grad(): SCREAMING_SNAKE_CASE : Optional[Any] = model(**A ) # masks_queries_logits SCREAMING_SNAKE_CASE : List[Any] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape, (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4), ) SCREAMING_SNAKE_CASE : Optional[Any] = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]] SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(A ).to(A ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3], A, atol=A ) ) # class_queries_logits SCREAMING_SNAKE_CASE : Dict = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape, (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) SCREAMING_SNAKE_CASE : int = torch.tensor( [[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(A ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3], A, atol=A ) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = ( MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' ) .to(A ) .eval() ) SCREAMING_SNAKE_CASE : List[str] = self.default_image_processor SCREAMING_SNAKE_CASE : int = image_processor( [np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )], segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )], return_tensors='pt', ) SCREAMING_SNAKE_CASE : List[str] = inputs["""pixel_values"""].to(A ) SCREAMING_SNAKE_CASE : Tuple = [el.to(A ) for el in inputs["""mask_labels"""]] SCREAMING_SNAKE_CASE : Union[str, Any] = [el.to(A ) for el in inputs["""class_labels"""]] with torch.no_grad(): SCREAMING_SNAKE_CASE : Any = model(**A ) self.assertTrue(outputs.loss is not None )
351
'''simple docstring''' def lowercase__( __UpperCamelCase: int ): """simple docstring""" if divisor % 5 == 0 or divisor % 2 == 0: return 0 SCREAMING_SNAKE_CASE : str = 1 SCREAMING_SNAKE_CASE : Optional[int] = 1 while repunit: SCREAMING_SNAKE_CASE : List[str] = (10 * repunit + 1) % divisor repunit_index += 1 return repunit_index def lowercase__( __UpperCamelCase: int = 1_00_00_00 ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = limit - 1 if divisor % 2 == 0: divisor += 1 while least_divisible_repunit(__UpperCamelCase ) <= limit: divisor += 2 return divisor if __name__ == "__main__": print(F"""{solution() = }""")
246
0
'''simple docstring''' from __future__ import annotations import math def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> int: '''simple docstring''' if depth < 0: raise ValueError('''Depth cannot be less than 0''' ) if not scores: raise ValueError('''Scores cannot be empty''' ) if depth == height: return scores[node_index] return ( max( minimax(depth + 1 , node_index * 2 , _lowercase , _lowercase , _lowercase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowercase , _lowercase , _lowercase ) , ) if is_max else min( minimax(depth + 1 , node_index * 2 , _lowercase , _lowercase , _lowercase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowercase , _lowercase , _lowercase ) , ) ) def lowercase_ ( ) -> None: '''simple docstring''' lowerCamelCase_ : List[str] = [90, 23, 6, 33, 21, 65, 123, 34_423] lowerCamelCase_ : List[Any] = math.log(len(_lowercase ) , 2 ) print(F"""Optimal value : {minimax(0 , 0 , _lowercase , _lowercase , _lowercase )}""" ) if __name__ == "__main__": import doctest doctest.testmod() main()
318
'''simple docstring''' from dataclasses import dataclass from typing import Tuple import numpy as np import torch @dataclass class snake_case__ : a_ = 42 # [batch_size x 3] a_ = 42 # [batch_size x 3] a_ = 42 # [batch_size x 3] a_ = 42 # [batch_size x 3] a_ = 42 a_ = 42 a_ = 42 a_ = 42 a_ = 42 def A ( self : Tuple ) -> Optional[int]: assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0] assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3 assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2 def A ( self : List[Any] ) -> Union[str, Any]: return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) ) def A ( self : Any ) -> Optional[Any]: return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) ) def A ( self : Optional[int] ) -> torch.Tensor: UpperCAmelCase_ : Dict = torch.arange(self.height * self.width ) UpperCAmelCase_ : int = torch.stack( [ pixel_indices % self.width, torch.div(_A , self.width , rounding_mode='''trunc''' ), ] , axis=1 , ) return coords @property def A ( self : Optional[Any] ) -> Optional[Any]: UpperCAmelCase_ , *UpperCAmelCase_ : Union[str, Any] = self.shape UpperCAmelCase_ : Optional[Any] = int(np.prod(_A ) ) UpperCAmelCase_ : Any = self.get_image_coords() UpperCAmelCase_ : Any = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] ) UpperCAmelCase_ : Union[str, Any] = self.get_camera_rays(_A ) UpperCAmelCase_ : str = rays.view(_A , inner_batch_size * self.height * self.width , 2 , 3 ) return rays def A ( self : Optional[int] , _A : torch.Tensor ) -> torch.Tensor: UpperCAmelCase_ , *UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = coords.shape assert n_coords == 2 assert batch_size == self.origin.shape[0] UpperCAmelCase_ : Dict = coords.view(_A , -1 , 2 ) UpperCAmelCase_ : Union[str, Any] = self.resolution() UpperCAmelCase_ : int = self.fov() UpperCAmelCase_ : Dict = (flat.float() / (res - 1)) * 2 - 1 UpperCAmelCase_ : Optional[int] = fracs * torch.tan(fov / 2 ) UpperCAmelCase_ : Any = fracs.view(_A , -1 , 2 ) UpperCAmelCase_ : List[Any] = ( self.z.view(_A , 1 , 3 ) + self.x.view(_A , 1 , 3 ) * fracs[:, :, :1] + self.y.view(_A , 1 , 3 ) * fracs[:, :, 1:] ) UpperCAmelCase_ : Optional[Any] = directions / directions.norm(dim=-1 , keepdim=_A ) UpperCAmelCase_ : Union[str, Any] = torch.stack( [ torch.broadcast_to(self.origin.view(_A , 1 , 3 ) , [batch_size, directions.shape[1], 3] ), directions, ] , dim=2 , ) return rays.view(_A , *_A , 2 , 3 ) def A ( self : Tuple , _A : int , _A : int ) -> "DifferentiableProjectiveCamera": assert width * self.height == height * self.width, "The aspect ratio should not change." return DifferentiableProjectiveCamera( origin=self.origin , x=self.x , y=self.y , z=self.z , width=_A , height=_A , x_fov=self.x_fov , y_fov=self.y_fov , ) def __UpperCAmelCase ( A : int ) -> DifferentiableProjectiveCamera: UpperCAmelCase_ : List[str] = [] UpperCAmelCase_ : Optional[int] = [] UpperCAmelCase_ : Optional[Any] = [] UpperCAmelCase_ : str = [] for theta in np.linspace(0 , 2 * np.pi , num=2_0 ): UpperCAmelCase_ : str = np.array([np.sin(A ), np.cos(A ), -0.5] ) z /= np.sqrt(np.sum(z**2 ) ) UpperCAmelCase_ : Optional[int] = -z * 4 UpperCAmelCase_ : Optional[int] = np.array([np.cos(A ), -np.sin(A ), 0.0] ) UpperCAmelCase_ : List[Any] = np.cross(A , A ) origins.append(A ) xs.append(A ) ys.append(A ) zs.append(A ) return DifferentiableProjectiveCamera( origin=torch.from_numpy(np.stack(A , axis=0 ) ).float() , x=torch.from_numpy(np.stack(A , axis=0 ) ).float() , y=torch.from_numpy(np.stack(A , axis=0 ) ).float() , z=torch.from_numpy(np.stack(A , axis=0 ) ).float() , width=A , height=A , x_fov=0.7 , y_fov=0.7 , shape=(1, len(A )) , )
304
0
import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetImgaImgPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class __SCREAMING_SNAKE_CASE ( A__ ,A__ ,A__ ,unittest.TestCase ): _UpperCAmelCase : int = StableDiffusionControlNetImgaImgPipeline _UpperCAmelCase : str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} _UpperCAmelCase : Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS _UpperCAmelCase : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} ) _UpperCAmelCase : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS def __lowerCamelCase ( self : Dict ) ->List[str]: torch.manual_seed(0 ) lowerCamelCase__ : Tuple = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , ) torch.manual_seed(0 ) lowerCamelCase__ : List[Any] = ControlNetModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , ) torch.manual_seed(0 ) lowerCamelCase__ : Optional[Any] = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=__A , set_alpha_to_one=__A , ) torch.manual_seed(0 ) lowerCamelCase__ : List[str] = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) lowerCamelCase__ : Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) lowerCamelCase__ : List[Any] = CLIPTextModel(__A ) lowerCamelCase__ : int = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) lowerCamelCase__ : Union[str, Any] = { """unet""": unet, """controlnet""": controlnet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def __lowerCamelCase ( self : Union[str, Any] , A : Dict , A : Tuple=0 ) ->List[str]: if str(__A ).startswith('''mps''' ): lowerCamelCase__ : Tuple = torch.manual_seed(__A ) else: lowerCamelCase__ : Optional[int] = torch.Generator(device=__A ).manual_seed(__A ) lowerCamelCase__ : List[Any] = 2 lowerCamelCase__ : int = randn_tensor( (1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ) lowerCamelCase__ : Optional[int] = floats_tensor(control_image.shape , rng=random.Random(__A ) ).to(__A ) lowerCamelCase__ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCamelCase__ : List[Any] = Image.fromarray(np.uinta(__A ) ).convert('''RGB''' ).resize((6_4, 6_4) ) lowerCamelCase__ : Union[str, Any] = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", """image""": image, """control_image""": control_image, } return inputs def __lowerCamelCase ( self : Tuple ) ->int: return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __lowerCamelCase ( self : List[Any] ) ->Union[str, Any]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 ) def __lowerCamelCase ( self : Tuple ) ->List[str]: self._test_inference_batch_single_identical(expected_max_diff=2e-3 ) class __SCREAMING_SNAKE_CASE ( A__ ,A__ ,unittest.TestCase ): _UpperCAmelCase : List[str] = StableDiffusionControlNetImgaImgPipeline _UpperCAmelCase : int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} _UpperCAmelCase : str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS _UpperCAmelCase : int = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def __lowerCamelCase ( self : Optional[Any] ) ->Optional[int]: torch.manual_seed(0 ) lowerCamelCase__ : Dict = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , ) torch.manual_seed(0 ) def init_weights(A : Dict ): if isinstance(__A , torch.nn.Convad ): torch.nn.init.normal(m.weight ) m.bias.data.fill_(1.0 ) lowerCamelCase__ : List[Any] = ControlNetModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , ) controlneta.controlnet_down_blocks.apply(__A ) torch.manual_seed(0 ) lowerCamelCase__ : Optional[Any] = ControlNetModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , ) controlneta.controlnet_down_blocks.apply(__A ) torch.manual_seed(0 ) lowerCamelCase__ : Optional[Any] = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=__A , set_alpha_to_one=__A , ) torch.manual_seed(0 ) lowerCamelCase__ : Optional[int] = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) lowerCamelCase__ : Optional[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) lowerCamelCase__ : str = CLIPTextModel(__A ) lowerCamelCase__ : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) lowerCamelCase__ : Optional[Any] = MultiControlNetModel([controlneta, controlneta] ) lowerCamelCase__ : List[Any] = { """unet""": unet, """controlnet""": controlnet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def __lowerCamelCase ( self : List[str] , A : Union[str, Any] , A : Dict=0 ) ->str: if str(__A ).startswith('''mps''' ): lowerCamelCase__ : Optional[Any] = torch.manual_seed(__A ) else: lowerCamelCase__ : List[Any] = torch.Generator(device=__A ).manual_seed(__A ) lowerCamelCase__ : Optional[Any] = 2 lowerCamelCase__ : Optional[int] = [ randn_tensor( (1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ), randn_tensor( (1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ), ] lowerCamelCase__ : int = floats_tensor(control_image[0].shape , rng=random.Random(__A ) ).to(__A ) lowerCamelCase__ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCamelCase__ : List[Any] = Image.fromarray(np.uinta(__A ) ).convert('''RGB''' ).resize((6_4, 6_4) ) lowerCamelCase__ : List[str] = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", """image""": image, """control_image""": control_image, } return inputs def __lowerCamelCase ( self : Optional[Any] ) ->Optional[Any]: lowerCamelCase__ : List[str] = self.get_dummy_components() lowerCamelCase__ : Tuple = self.pipeline_class(**__A ) pipe.to(__A ) lowerCamelCase__ : Union[str, Any] = 1_0.0 lowerCamelCase__ : Union[str, Any] = 4 lowerCamelCase__ : Tuple = self.get_dummy_inputs(__A ) lowerCamelCase__ : List[str] = steps lowerCamelCase__ : int = scale lowerCamelCase__ : Union[str, Any] = pipe(**__A )[0] lowerCamelCase__ : Any = self.get_dummy_inputs(__A ) lowerCamelCase__ : str = steps lowerCamelCase__ : str = scale lowerCamelCase__ : Tuple = pipe(**__A , control_guidance_start=0.1 , control_guidance_end=0.2 )[0] lowerCamelCase__ : Optional[Any] = self.get_dummy_inputs(__A ) lowerCamelCase__ : Union[str, Any] = steps lowerCamelCase__ : Union[str, Any] = scale lowerCamelCase__ : str = pipe(**__A , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0] lowerCamelCase__ : List[str] = self.get_dummy_inputs(__A ) lowerCamelCase__ : Optional[int] = steps lowerCamelCase__ : Tuple = scale lowerCamelCase__ : str = pipe(**__A , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0] # make sure that all outputs are different assert np.sum(np.abs(output_a - output_a ) ) > 1e-3 assert np.sum(np.abs(output_a - output_a ) ) > 1e-3 assert np.sum(np.abs(output_a - output_a ) ) > 1e-3 def __lowerCamelCase ( self : Optional[int] ) ->Dict: return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __lowerCamelCase ( self : Optional[int] ) ->Tuple: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 ) def __lowerCamelCase ( self : Optional[int] ) ->Optional[int]: self._test_inference_batch_single_identical(expected_max_diff=2e-3 ) def __lowerCamelCase ( self : Optional[int] ) ->List[str]: lowerCamelCase__ : str = self.get_dummy_components() lowerCamelCase__ : Tuple = self.pipeline_class(**__A ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(__A ) except NotImplementedError: pass @slow @require_torch_gpu class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCamelCase ( self : Optional[Any] ) ->int: super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCamelCase ( self : Dict ) ->str: lowerCamelCase__ : Any = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' ) lowerCamelCase__ : int = StableDiffusionControlNetImgaImgPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , safety_checker=__A , controlnet=__A ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=__A ) lowerCamelCase__ : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 ) lowerCamelCase__ : List[Any] = """evil space-punk bird""" lowerCamelCase__ : List[Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((5_1_2, 5_1_2) ) lowerCamelCase__ : int = load_image( '''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((5_1_2, 5_1_2) ) lowerCamelCase__ : Union[str, Any] = pipe( __A , __A , control_image=__A , generator=__A , output_type='''np''' , num_inference_steps=5_0 , strength=0.6 , ) lowerCamelCase__ : Tuple = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) lowerCamelCase__ : Tuple = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' ) assert np.abs(expected_image - image ).max() < 9e-2
351
from __future__ import annotations _A : List[str] = '#' class __SCREAMING_SNAKE_CASE : def __init__( self : List[Any] ) ->None: lowerCamelCase__ : dict = {} def __lowerCamelCase ( self : Union[str, Any] , A : str ) ->None: lowerCamelCase__ : Any = self._trie for char in text: if char not in trie: lowerCamelCase__ : Any = {} lowerCamelCase__ : Any = trie[char] lowerCamelCase__ : List[str] = True def __lowerCamelCase ( self : List[Any] , A : str ) ->tuple | list: lowerCamelCase__ : Dict = self._trie for char in prefix: if char in trie: lowerCamelCase__ : List[Any] = trie[char] else: return [] return self._elements(A ) def __lowerCamelCase ( self : Dict , A : dict ) ->tuple: lowerCamelCase__ : Optional[Any] = [] for c, v in d.items(): lowerCamelCase__ : Any = [''' '''] if c == END else [(c + s) for s in self._elements(A )] result.extend(A ) return tuple(A ) _A : str = Trie() _A : List[Any] = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal') for word in words: trie.insert_word(word) def _a ( UpperCAmelCase ) -> tuple: """simple docstring""" lowerCamelCase__ : Optional[int] = trie.find_word(UpperCAmelCase ) return tuple(string + word for word in suffixes ) def _a ( ) -> None: """simple docstring""" print(autocomplete_using_trie('''de''' ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
265
0
"""simple docstring""" from . import __version__ # Backward compatibility imports, to make sure all those objects can be found in file_utils from .utils import ( CLOUDFRONT_DISTRIB_PREFIX, CONFIG_NAME, DISABLE_TELEMETRY, DUMMY_INPUTS, DUMMY_MASK, ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, FEATURE_EXTRACTOR_NAME, FLAX_WEIGHTS_NAME, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, MODEL_CARD_NAME, MULTIPLE_CHOICE_DUMMY_INPUTS, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, SENTENCEPIECE_UNDERLINE, SPIECE_UNDERLINE, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, TORCH_FX_REQUIRED_VERSION, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, USE_JAX, USE_TF, USE_TORCH, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, DummyObject, EntryNotFoundError, ExplicitEnum, ModelOutput, PaddingStrategy, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, TensorType, _LazyModule, add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, cached_property, copy_func, default_cache_path, define_sagemaker_information, get_cached_models, get_file_from_repo, get_full_repo_name, get_torch_version, has_file, http_user_agent, is_apex_available, is_bsa_available, is_coloredlogs_available, is_datasets_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_librosa_available, is_offline_mode, is_onnx_available, is_pandas_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytorch_quantization_available, is_rjieba_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_tensor, is_tensorflow_probability_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_training_run_on_sagemaker, is_vision_available, replace_return_docstrings, requires_backends, to_numpy, to_py_obj, torch_only_method, )
77
import argparse import datetime import json import time import warnings from logging import getLogger from pathlib import Path from typing import Dict, List import torch from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params lowercase__ : Any = getLogger(__name__) lowercase__ : List[str] = "cuda" if torch.cuda.is_available() else "cpu" def A_ ( snake_case : List[str] , snake_case : str , snake_case : str , snake_case : int = 8 , snake_case : str = DEFAULT_DEVICE , snake_case : List[str]=False , snake_case : Union[str, Any]="summarization" , snake_case : str=None , **snake_case : List[Any] , ) -> Dict: '''simple docstring''' __UpperCamelCase = Path(snake_case ).open('''w''' , encoding='''utf-8''' ) __UpperCamelCase = str(snake_case ) __UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained(snake_case ).to(snake_case ) if fpaa: __UpperCamelCase = model.half() __UpperCamelCase = AutoTokenizer.from_pretrained(snake_case ) logger.info(f"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type. __UpperCamelCase = time.time() # update config with task specific params use_task_specific_params(snake_case , snake_case ) if prefix is None: __UpperCamelCase = prefix or getattr(model.config , '''prefix''' , '''''' ) or '''''' for examples_chunk in tqdm(list(chunks(snake_case , snake_case ) ) ): __UpperCamelCase = [prefix + text for text in examples_chunk] __UpperCamelCase = tokenizer(snake_case , return_tensors='''pt''' , truncation=snake_case , padding='''longest''' ).to(snake_case ) __UpperCamelCase = model.generate( input_ids=batch.input_ids , attention_mask=batch.attention_mask , **snake_case , ) __UpperCamelCase = tokenizer.batch_decode(snake_case , skip_special_tokens=snake_case , clean_up_tokenization_spaces=snake_case ) for hypothesis in dec: fout.write(hypothesis + '''\n''' ) fout.flush() fout.close() __UpperCamelCase = int(time.time() - start_time ) # seconds __UpperCamelCase = len(snake_case ) return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )} def A_ ( ) -> Tuple: '''simple docstring''' return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' ) def A_ ( snake_case : str=True ) -> int: '''simple docstring''' __UpperCamelCase = argparse.ArgumentParser() parser.add_argument('''model_name''' , type=snake_case , help='''like facebook/bart-large-cnn,t5-base, etc.''' ) parser.add_argument('''input_path''' , type=snake_case , help='''like cnn_dm/test.source''' ) parser.add_argument('''save_path''' , type=snake_case , help='''where to save summaries''' ) parser.add_argument('''--reference_path''' , type=snake_case , required=snake_case , help='''like cnn_dm/test.target''' ) parser.add_argument('''--score_path''' , type=snake_case , required=snake_case , default='''metrics.json''' , help='''where to save metrics''' ) parser.add_argument('''--device''' , type=snake_case , required=snake_case , default=snake_case , help='''cuda, cuda:1, cpu etc.''' ) parser.add_argument( '''--prefix''' , type=snake_case , required=snake_case , default=snake_case , help='''will be added to the begininng of src examples''' ) parser.add_argument('''--task''' , type=snake_case , default='''summarization''' , help='''used for task_specific_params + metrics''' ) parser.add_argument('''--bs''' , type=snake_case , default=8 , required=snake_case , help='''batch size''' ) parser.add_argument( '''--n_obs''' , type=snake_case , default=-1 , required=snake_case , help='''How many observations. Defaults to all.''' ) parser.add_argument('''--fp16''' , action='''store_true''' ) parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' ) parser.add_argument( '''--info''' , nargs='''?''' , type=snake_case , const=datetime_now() , help=( '''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.''' ''' lang=en-ru. If no value is passed, the current datetime string will be used.''' ) , ) # Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate __UpperCamelCase , __UpperCamelCase = parser.parse_known_args() __UpperCamelCase = parse_numeric_n_bool_cl_kwargs(snake_case ) if parsed_args and verbose: print(f"parsed the following generate kwargs: {parsed_args}" ) __UpperCamelCase = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()] if args.n_obs > 0: __UpperCamelCase = examples[: args.n_obs] Path(args.save_path ).parent.mkdir(exist_ok=snake_case ) if args.reference_path is None and Path(args.score_path ).exists(): warnings.warn(f"score_path {args.score_path} will be overwritten unless you type ctrl-c." ) if args.device == "cpu" and args.fpaa: # this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half' raise ValueError('''Can\'t mix --fp16 and --device cpu''' ) __UpperCamelCase = generate_summaries_or_translations( snake_case , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **snake_case , ) if args.reference_path is None: return {} # Compute scores __UpperCamelCase = calculate_bleu if '''translation''' in args.task else calculate_rouge __UpperCamelCase = [x.rstrip() for x in open(args.save_path ).readlines()] __UpperCamelCase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(snake_case )] __UpperCamelCase = score_fn(snake_case , snake_case ) scores.update(snake_case ) if args.dump_args: scores.update(snake_case ) if args.info: __UpperCamelCase = args.info if verbose: print(snake_case ) if args.score_path is not None: json.dump(snake_case , open(args.score_path , '''w''' ) ) return scores if __name__ == "__main__": # Usage for MT: # python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@ run_generate(verbose=True)
328
0
from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar lowerCAmelCase__ :int = TypeVar('''KEY''') lowerCAmelCase__ :List[str] = TypeVar('''VAL''') @dataclass(frozen=UpperCAmelCase , slots=UpperCAmelCase ) class __a ( Generic[KEY, VAL] ): _a : KEY _a : VAL class __a ( _Item ): def __init__( self ) -> None: """simple docstring""" super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __bool__( self ) -> bool: """simple docstring""" return False lowerCAmelCase__ :Optional[Any] = _DeletedItem() class __a ( MutableMapping[KEY, VAL] ): def __init__( self , _SCREAMING_SNAKE_CASE = 8 , _SCREAMING_SNAKE_CASE = 0.75 ) -> None: """simple docstring""" _UpperCAmelCase = initial_block_size _UpperCAmelCase = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 _UpperCAmelCase = capacity_factor _UpperCAmelCase = 0 def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" return hash(_SCREAMING_SNAKE_CASE ) % len(self._buckets ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" return (ind + 1) % len(self._buckets ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool: """simple docstring""" _UpperCAmelCase = self._buckets[ind] if not stored: _UpperCAmelCase = _Item(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self._len += 1 return True elif stored.key == key: _UpperCAmelCase = _Item(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return True else: return False def UpperCAmelCase__ ( self ) -> bool: """simple docstring""" _UpperCAmelCase = len(self._buckets ) * self._capacity_factor return len(self ) >= int(_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> bool: """simple docstring""" if len(self._buckets ) <= self._initial_block_size: return False _UpperCAmelCase = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" _UpperCAmelCase = self._buckets _UpperCAmelCase = [None] * new_size _UpperCAmelCase = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def UpperCAmelCase__ ( self ) -> None: """simple docstring""" self._resize(len(self._buckets ) * 2 ) def UpperCAmelCase__ ( self ) -> None: """simple docstring""" self._resize(len(self._buckets ) // 2 ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Iterator[int]: """simple docstring""" _UpperCAmelCase = self._get_bucket_index(_SCREAMING_SNAKE_CASE ) for _ in range(len(self._buckets ) ): yield ind _UpperCAmelCase = self._get_next_ind(_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" for ind in self._iterate_buckets(_SCREAMING_SNAKE_CASE ): if self._try_set(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): break def __setitem__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" if self._is_full(): self._size_up() self._add_item(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __delitem__( self , _SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" for ind in self._iterate_buckets(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = self._buckets[ind] if item is None: raise KeyError(_SCREAMING_SNAKE_CASE ) if item is _deleted: continue if item.key == key: _UpperCAmelCase = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> VAL: """simple docstring""" for ind in self._iterate_buckets(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(_SCREAMING_SNAKE_CASE ) def __len__( self ) -> int: """simple docstring""" return self._len def __iter__( self ) -> Iterator[KEY]: """simple docstring""" yield from (item.key for item in self._buckets if item) def __repr__( self ) -> str: """simple docstring""" _UpperCAmelCase = ' ,'.join( f'''{item.key}: {item.val}''' for item in self._buckets if item ) return f'''HashMap({val_string})'''
369
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class __a ( UpperCAmelCase ): _a : Union[List[np.ndarray], torch.FloatTensor] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
185
0
'''simple docstring''' import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import pandas as pd from datasets import load_dataset import transformers from transformers import ( AutoConfig, BartForSequenceClassification, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, TapexTokenizer, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.17.0.dev0') require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt') A =logging.getLogger(__name__) @dataclass class _a : __a : Optional[str] = field( default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} ) __a : Optional[str] = field( default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , ) __a : int = field( default=1_024 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) __a : bool = field( default=__snake_case , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} ) __a : bool = field( default=__snake_case , metadata={ """help""": ( """Whether to pad all samples to `max_seq_length`. """ """If False, will pad the samples dynamically when batching to the maximum length in the batch.""" ) } , ) __a : Optional[int] = field( default=__snake_case , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) __a : Optional[int] = field( default=__snake_case , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) __a : Optional[int] = field( default=__snake_case , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of prediction examples to this """ """value if set.""" ) } , ) __a : Optional[str] = field( default=__snake_case , metadata={"""help""": """A csv or a json file containing the training data."""} ) __a : Optional[str] = field( default=__snake_case , metadata={"""help""": """A csv or a json file containing the validation data."""} ) __a : Optional[str] = field(default=__snake_case , metadata={"""help""": """A csv or a json file containing the test data."""} ) def A ( self : List[str] ): '''simple docstring''' if self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' ) else: UpperCAmelCase = self.train_file.split('''.''' )[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." UpperCAmelCase = self.validation_file.split('''.''' )[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class _a : __a : str = field( default=__snake_case , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) __a : Optional[str] = field( default=__snake_case , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) __a : Optional[str] = field( default=__snake_case , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) __a : Optional[str] = field( default=__snake_case , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) __a : bool = field( default=__snake_case , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , ) __a : str = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) __a : bool = field( default=__snake_case , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) def snake_case_ (): UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCAmelCase = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) UpperCAmelCase = training_args.get_process_log_level() logger.setLevel(_a ) datasets.utils.logging.set_verbosity(_a ) transformers.utils.logging.set_verbosity(_a ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" ) logger.info(F"Training/evaluation parameters {training_args}" ) # Detecting last checkpoint. UpperCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: UpperCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"Output directory ({training_args.output_dir}) already exists and is not empty. " '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. UpperCAmelCase = {"""train""": data_args.train_file, """validation""": data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: UpperCAmelCase = data_args.train_file.split('''.''' )[-1] UpperCAmelCase = data_args.test_file.split('''.''' )[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." UpperCAmelCase = data_args.test_file else: raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' ) for key in data_files.keys(): logger.info(F"load a local file for {key}: {data_files[key]}" ) if data_args.train_file.endswith('''.csv''' ): # Loading a dataset from local csv files UpperCAmelCase = load_dataset('''csv''' , data_files=_a , cache_dir=model_args.cache_dir ) else: # Loading a dataset from local json files UpperCAmelCase = load_dataset('''json''' , data_files=_a , cache_dir=model_args.cache_dir ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels UpperCAmelCase = raw_datasets["""train"""].features["""label"""].names UpperCAmelCase = len(_a ) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_a , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # load tapex tokenizer UpperCAmelCase = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_a , ) UpperCAmelCase = BartForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_a , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Padding strategy if data_args.pad_to_max_length: UpperCAmelCase = """max_length""" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch UpperCAmelCase = False # Some models have set the order of the labels to use, so let's make sure we do use it. UpperCAmelCase = {"""Refused""": 0, """Entailed""": 1} UpperCAmelCase = {0: """Refused""", 1: """Entailed"""} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the" F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) UpperCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length ) def preprocess_tabfact_function(_a : Optional[Any] ): # Tokenize the texts def _convert_table_text_to_pandas(_a : Optional[Any] ): UpperCAmelCase = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )] UpperCAmelCase = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] ) return _table_pd UpperCAmelCase = examples["""statement"""] UpperCAmelCase = list(map(_convert_table_text_to_pandas , examples['''table_text'''] ) ) UpperCAmelCase = tokenizer(_a , _a , padding=_a , max_length=_a , truncation=_a ) UpperCAmelCase = examples["""label"""] return result with training_args.main_process_first(desc='''dataset map pre-processing''' ): UpperCAmelCase = raw_datasets.map( _a , batched=_a , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on dataset''' , ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError('''--do_train requires a train dataset''' ) UpperCAmelCase = raw_datasets["""train"""] if data_args.max_train_samples is not None: UpperCAmelCase = train_dataset.select(range(data_args.max_train_samples ) ) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError('''--do_eval requires a validation dataset''' ) UpperCAmelCase = raw_datasets["""validation"""] if data_args.max_eval_samples is not None: UpperCAmelCase = eval_dataset.select(range(data_args.max_eval_samples ) ) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError('''--do_predict requires a test dataset''' ) UpperCAmelCase = raw_datasets["""test"""] if data_args.max_predict_samples is not None: UpperCAmelCase = predict_dataset.select(range(data_args.max_predict_samples ) ) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(_a ) ) , 3 ): logger.info(F"Sample {index} of the training set: {train_dataset[index]}." ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(_a : EvalPrediction ): UpperCAmelCase = p.predictions[0] if isinstance(p.predictions , _a ) else p.predictions UpperCAmelCase = np.argmax(_a , axis=1 ) return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: UpperCAmelCase = default_data_collator elif training_args.fpaa: UpperCAmelCase = DataCollatorWithPadding(_a , pad_to_multiple_of=8 ) else: UpperCAmelCase = None # Initialize our Trainer UpperCAmelCase = Trainer( model=_a , args=_a , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_a , tokenizer=_a , data_collator=_a , ) # Training if training_args.do_train: UpperCAmelCase = None if training_args.resume_from_checkpoint is not None: UpperCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: UpperCAmelCase = last_checkpoint UpperCAmelCase = trainer.train(resume_from_checkpoint=_a ) UpperCAmelCase = train_result.metrics UpperCAmelCase = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(_a ) ) UpperCAmelCase = min(_a , len(_a ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics('''train''' , _a ) trainer.save_metrics('''train''' , _a ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) UpperCAmelCase = trainer.evaluate(eval_dataset=_a ) UpperCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_a ) UpperCAmelCase = min(_a , len(_a ) ) trainer.log_metrics('''eval''' , _a ) trainer.save_metrics('''eval''' , _a ) if training_args.do_predict: logger.info('''*** Predict ***''' ) # Removing the `label` columns because it contains -1 and Trainer won't like that. UpperCAmelCase = predict_dataset.remove_columns('''label''' ) UpperCAmelCase = trainer.predict(_a , metric_key_prefix='''predict''' ).predictions UpperCAmelCase = np.argmax(_a , axis=1 ) UpperCAmelCase = os.path.join(training_args.output_dir , '''predict_results_tabfact.txt''' ) if trainer.is_world_process_zero(): with open(_a , '''w''' ) as writer: logger.info('''***** Predict Results *****''' ) writer.write('''index\tprediction\n''' ) for index, item in enumerate(_a ): UpperCAmelCase = label_list[item] writer.write(F"{index}\t{item}\n" ) UpperCAmelCase = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""} if training_args.push_to_hub: trainer.push_to_hub(**_a ) else: trainer.create_model_card(**_a ) def snake_case_ (_a : Dict ): main() if __name__ == "__main__": main()
34
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''} # See all LED models at https://huggingface.co/models?filter=LED UpperCamelCase_ = { '''vocab_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''', }, '''merges_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''', }, } UpperCamelCase_ = { '''allenai/led-base-16384''': 16384, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def lowerCamelCase_ ( ): '''simple docstring''' UpperCAmelCase_ : int = ( list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) ) ) UpperCAmelCase_ : Dict = bs[:] UpperCAmelCase_ : Any = 0 for b in range(2**8 ): if b not in bs: bs.append(_a ) cs.append(2**8 + n ) n += 1 UpperCAmelCase_ : Any = [chr(_a ) for n in cs] return dict(zip(_a , _a ) ) def lowerCamelCase_ ( _a : List[str] ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = set() UpperCAmelCase_ : List[Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) UpperCAmelCase_ : Optional[int] = char return pairs class _snake_case ( __snake_case ): '''simple docstring''' A__ : str = VOCAB_FILES_NAMES A__ : List[str] = PRETRAINED_VOCAB_FILES_MAP A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ : Optional[int] = ["input_ids", "attention_mask"] def __init__( self: Union[str, Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any]="replace" ,lowerCamelCase_: Optional[Any]="<s>" ,lowerCamelCase_: List[Any]="</s>" ,lowerCamelCase_: List[str]="</s>" ,lowerCamelCase_: int="<s>" ,lowerCamelCase_: int="<unk>" ,lowerCamelCase_: str="<pad>" ,lowerCamelCase_: Optional[Any]="<mask>" ,lowerCamelCase_: List[str]=False ,**lowerCamelCase_: Tuple ,) -> Any: UpperCAmelCase_ : Union[str, Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else bos_token UpperCAmelCase_ : int = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else eos_token UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else sep_token UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else cls_token UpperCAmelCase_ : Optional[Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else unk_token UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it UpperCAmelCase_ : str = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else mask_token super().__init__( errors=lowerCamelCase_ ,bos_token=lowerCamelCase_ ,eos_token=lowerCamelCase_ ,unk_token=lowerCamelCase_ ,sep_token=lowerCamelCase_ ,cls_token=lowerCamelCase_ ,pad_token=lowerCamelCase_ ,mask_token=lowerCamelCase_ ,add_prefix_space=lowerCamelCase_ ,**lowerCamelCase_ ,) with open(lowerCamelCase_ ,encoding="""utf-8""" ) as vocab_handle: UpperCAmelCase_ : Union[str, Any] = json.load(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = {v: k for k, v in self.encoder.items()} UpperCAmelCase_ : Any = errors # how to handle errors in decoding UpperCAmelCase_ : int = bytes_to_unicode() UpperCAmelCase_ : Dict = {v: k for k, v in self.byte_encoder.items()} with open(lowerCamelCase_ ,encoding="""utf-8""" ) as merges_handle: UpperCAmelCase_ : Any = merges_handle.read().split("""\n""" )[1:-1] UpperCAmelCase_ : int = [tuple(merge.split() ) for merge in bpe_merges] UpperCAmelCase_ : Union[str, Any] = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) ) UpperCAmelCase_ : Tuple = {} UpperCAmelCase_ : Optional[int] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions UpperCAmelCase_ : int = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def A__ ( self: List[str] ) -> List[str]: return len(self.encoder ) def A__ ( self: Any ) -> Union[str, Any]: return dict(self.encoder ,**self.added_tokens_encoder ) def A__ ( self: Tuple ,lowerCamelCase_: Dict ) -> Optional[Any]: if token in self.cache: return self.cache[token] UpperCAmelCase_ : Union[str, Any] = tuple(lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = get_pairs(lowerCamelCase_ ) if not pairs: return token while True: UpperCAmelCase_ : Union[str, Any] = min(lowerCamelCase_ ,key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ ,float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break UpperCAmelCase_ , UpperCAmelCase_ : Any = bigram UpperCAmelCase_ : Optional[Any] = [] UpperCAmelCase_ : List[str] = 0 while i < len(lowerCamelCase_ ): try: UpperCAmelCase_ : str = word.index(lowerCamelCase_ ,lowerCamelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) UpperCAmelCase_ : Union[str, Any] = j if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 UpperCAmelCase_ : List[str] = tuple(lowerCamelCase_ ) UpperCAmelCase_ : List[Any] = new_word if len(lowerCamelCase_ ) == 1: break else: UpperCAmelCase_ : List[str] = get_pairs(lowerCamelCase_ ) UpperCAmelCase_ : int = """ """.join(lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = word return word def A__ ( self: Union[str, Any] ,lowerCamelCase_: Tuple ) -> List[str]: UpperCAmelCase_ : str = [] for token in re.findall(self.pat ,lowerCamelCase_ ): UpperCAmelCase_ : List[Any] = """""".join( self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(""" """ ) ) return bpe_tokens def A__ ( self: List[Any] ,lowerCamelCase_: Optional[Any] ) -> Optional[int]: return self.encoder.get(lowerCamelCase_ ,self.encoder.get(self.unk_token ) ) def A__ ( self: List[str] ,lowerCamelCase_: str ) -> Optional[Any]: return self.decoder.get(lowerCamelCase_ ) def A__ ( self: List[str] ,lowerCamelCase_: List[str] ) -> List[Any]: UpperCAmelCase_ : str = """""".join(lowerCamelCase_ ) UpperCAmelCase_ : int = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors ) return text def A__ ( self: Optional[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(lowerCamelCase_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCAmelCase_ : List[Any] = os.path.join( lowerCamelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCAmelCase_ : List[str] = os.path.join( lowerCamelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowerCamelCase_ ,ensure_ascii=lowerCamelCase_ ) + """\n""" ) UpperCAmelCase_ : str = 0 with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCamelCase_ : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' """ Please check that the tokenizer is not corrupted!""" ) UpperCAmelCase_ : Tuple = token_index writer.write(""" """.join(lowerCamelCase_ ) + """\n""" ) index += 1 return vocab_file, merge_file def A__ ( self: str ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCAmelCase_ : int = [self.cls_token_id] UpperCAmelCase_ : Optional[int] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def A__ ( self: Union[str, Any] ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ,lowerCamelCase_: bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase_ ,token_ids_a=lowerCamelCase_ ,already_has_special_tokens=lowerCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase_ )) + [1] return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1] def A__ ( self: str ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ) -> List[int]: UpperCAmelCase_ : Optional[Any] = [self.sep_token_id] UpperCAmelCase_ : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def A__ ( self: Optional[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: str=False ,**lowerCamelCase_: List[str] ) -> Optional[int]: UpperCAmelCase_ : Optional[int] = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()): UpperCAmelCase_ : Dict = """ """ + text return (text, kwargs) def A__ ( self: List[str] ,lowerCamelCase_: Union[Dict[str, EncodedInput], BatchEncoding] ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: PaddingStrategy = PaddingStrategy.DO_NOT_PAD ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: Optional[bool] = None ,) -> dict: UpperCAmelCase_ : Optional[int] = super()._pad( encoded_inputs=lowerCamelCase_ ,max_length=lowerCamelCase_ ,padding_strategy=lowerCamelCase_ ,pad_to_multiple_of=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,) # Load from model defaults if return_attention_mask is None: UpperCAmelCase_ : str = """attention_mask""" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: UpperCAmelCase_ : str = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. UpperCAmelCase_ : List[Any] = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCamelCase_ ) if needs_to_be_padded: UpperCAmelCase_ : Dict = len(lowerCamelCase_ ) - len(encoded_inputs["""global_attention_mask"""] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` UpperCAmelCase_ : str = ( encoded_inputs["""global_attention_mask"""] + [-1] * difference ) elif self.padding_side == "left": UpperCAmelCase_ : List[str] = [-1] * difference + encoded_inputs[ """global_attention_mask""" ] else: raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) ) return encoded_inputs
345
0
'''simple docstring''' import os import tempfile import unittest import uuid from pathlib import Path from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available if is_torch_available(): import torch if is_soundfile_availble(): import soundfile as sf if is_vision_available(): from PIL import Image def a_ ( lowerCamelCase : Dict="" ): lowerCAmelCase = tempfile.mkdtemp() return os.path.join(lowerCamelCase , str(uuid.uuida() ) + suffix ) @require_soundfile @require_torch class UpperCAmelCase_ ( unittest.TestCase ): def __UpperCAmelCase ( self : str ) -> Dict: lowerCAmelCase = torch.rand(1_2 , dtype=torch.floataa ) - 0.5 lowerCAmelCase = AgentAudio(UpperCAmelCase__ ) lowerCAmelCase = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(UpperCAmelCase__ , agent_type.to_raw() , atol=1E-4 ) ) del agent_type # Ensure the path remains even after the object deletion self.assertTrue(os.path.exists(UpperCAmelCase__ ) ) # Ensure that the file contains the same value as the original tensor lowerCAmelCase , lowerCAmelCase = sf.read(UpperCAmelCase__ ) self.assertTrue(torch.allclose(UpperCAmelCase__ , torch.tensor(UpperCAmelCase__ ) , atol=1E-4 ) ) def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]: lowerCAmelCase = torch.rand(1_2 , dtype=torch.floataa ) - 0.5 lowerCAmelCase = get_new_path(suffix='.wav' ) sf.write(UpperCAmelCase__ , UpperCAmelCase__ , 1_6_0_0_0 ) lowerCAmelCase = AgentAudio(UpperCAmelCase__ ) self.assertTrue(torch.allclose(UpperCAmelCase__ , agent_type.to_raw() , atol=1E-4 ) ) self.assertEqual(agent_type.to_string() , UpperCAmelCase__ ) @require_vision @require_torch class UpperCAmelCase_ ( unittest.TestCase ): def __UpperCAmelCase ( self : str ) -> List[Any]: lowerCAmelCase = torch.randint(0 , 2_5_6 , (6_4, 6_4, 3) ) lowerCAmelCase = AgentImage(UpperCAmelCase__ ) lowerCAmelCase = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(UpperCAmelCase__ , agent_type._tensor , atol=1E-4 ) ) self.assertIsInstance(agent_type.to_raw() , Image.Image ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(UpperCAmelCase__ ) ) def __UpperCAmelCase ( self : Optional[int] ) -> int: lowerCAmelCase = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' lowerCAmelCase = Image.open(UpperCAmelCase__ ) lowerCAmelCase = AgentImage(UpperCAmelCase__ ) self.assertTrue(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(UpperCAmelCase__ ) ) def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: lowerCAmelCase = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' lowerCAmelCase = Image.open(UpperCAmelCase__ ) lowerCAmelCase = AgentImage(UpperCAmelCase__ ) self.assertFalse(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(UpperCAmelCase__ ) ) class UpperCAmelCase_ ( unittest.TestCase ): def __UpperCAmelCase ( self : Optional[Any] ) -> Dict: lowerCAmelCase = 'Hey!' lowerCAmelCase = AgentText(UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , agent_type.to_string() ) self.assertEqual(UpperCAmelCase__ , agent_type.to_raw() ) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
55
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __snake_case ={ """configuration_swiftformer""": [ """SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwiftFormerConfig""", """SwiftFormerOnnxConfig""", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case =[ """SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """SwiftFormerForImageClassification""", """SwiftFormerModel""", """SwiftFormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_swiftformer import ( SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SwiftFormerConfig, SwiftFormerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swiftformer import ( SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SwiftFormerForImageClassification, SwiftFormerModel, SwiftFormerPreTrainedModel, ) else: import sys __snake_case =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
55
1
import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class SCREAMING_SNAKE_CASE : def __init__( self : int , __lowercase : Optional[Any] , __lowercase : int=13 , __lowercase : int=32 , __lowercase : str=3 , __lowercase : List[str]=4 , __lowercase : Dict=[10, 20, 30, 40] , __lowercase : str=[2, 2, 3, 2] , __lowercase : str=True , __lowercase : Union[str, Any]=True , __lowercase : Optional[int]=37 , __lowercase : Dict="gelu" , __lowercase : Dict=10 , __lowercase : Any=0.02 , __lowercase : Tuple=["stage2", "stage3", "stage4"] , __lowercase : int=[2, 3, 4] , __lowercase : List[str]=None , ): '''simple docstring''' __a = parent __a = batch_size __a = image_size __a = num_channels __a = num_stages __a = hidden_sizes __a = depths __a = is_training __a = use_labels __a = intermediate_size __a = hidden_act __a = num_labels __a = initializer_range __a = out_features __a = out_indices __a = scope def UpperCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' __a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __a = None if self.use_labels: __a = ids_tensor([self.batch_size] , self.num_labels ) __a = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' return ConvNextVaConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowercase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def UpperCamelCase_ ( self : Any , __lowercase : Tuple , __lowercase : List[Any] , __lowercase : Tuple ): '''simple docstring''' __a = ConvNextVaModel(config=__lowercase ) model.to(__lowercase ) model.eval() __a = model(__lowercase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def UpperCamelCase_ ( self : Any , __lowercase : Dict , __lowercase : Dict , __lowercase : List[Any] ): '''simple docstring''' __a = ConvNextVaForImageClassification(__lowercase ) model.to(__lowercase ) model.eval() __a = model(__lowercase , labels=__lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self : int , __lowercase : Union[str, Any] , __lowercase : List[str] , __lowercase : Tuple ): '''simple docstring''' __a = ConvNextVaBackbone(config=__lowercase ) model.to(__lowercase ) model.eval() __a = model(__lowercase ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None __a = None __a = ConvNextVaBackbone(config=__lowercase ) model.to(__lowercase ) model.eval() __a = model(__lowercase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def UpperCamelCase_ ( self : Tuple ): '''simple docstring''' __a = self.prepare_config_and_inputs() __a , __a , __a = config_and_inputs __a = {"""pixel_values""": pixel_values} return config, inputs_dict def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' __a = self.prepare_config_and_inputs() __a , __a , __a = config_and_inputs __a = {"""pixel_values""": pixel_values, """labels""": labels} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): __lowerCamelCase : List[Any] =( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) __lowerCamelCase : List[str] =( {'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification} if is_torch_available() else {} ) __lowerCamelCase : int =False __lowerCamelCase : Optional[Any] =False __lowerCamelCase : List[Any] =False __lowerCamelCase : Union[str, Any] =False __lowerCamelCase : Optional[Any] =False def UpperCamelCase_ ( self : Optional[int] ): '''simple docstring''' __a = ConvNextVaModelTester(self ) __a = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37 ) def UpperCamelCase_ ( self : Dict ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase_ ( self : str ): '''simple docstring''' return @unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" ) def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' pass @unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" ) def UpperCamelCase_ ( self : Tuple ): '''simple docstring''' pass @unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" ) def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' pass def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: __a , __a = self.model_tester.prepare_config_and_inputs_with_labels() __a = True if model_class.__name__ in [ *get_values(__lowercase ), *get_values(__lowercase ), ]: continue __a = model_class(__lowercase ) model.to(__lowercase ) model.train() __a = self._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase ) __a = model(**__lowercase ).loss loss.backward() def UpperCamelCase_ ( self : Dict ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: __a , __a = self.model_tester.prepare_config_and_inputs_with_labels() __a = False __a = True if ( model_class.__name__ in [*get_values(__lowercase ), *get_values(__lowercase )] or not model_class.supports_gradient_checkpointing ): continue __a = model_class(__lowercase ) model.to(__lowercase ) model.gradient_checkpointing_enable() model.train() __a = self._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase ) __a = model(**__lowercase ).loss loss.backward() def UpperCamelCase_ ( self : int ): '''simple docstring''' __a , __a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a = model_class(__lowercase ) __a = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __a = [*signature.parameters.keys()] __a = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __lowercase ) def UpperCamelCase_ ( self : Any ): '''simple docstring''' __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowercase ) def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' def check_hidden_states_output(__lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : Optional[Any] ): __a = model_class(__lowercase ) model.to(__lowercase ) model.eval() with torch.no_grad(): __a = model(**self._prepare_for_class(__lowercase , __lowercase ) ) __a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __a = self.model_tester.num_stages self.assertEqual(len(__lowercase ) , expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) __a , __a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a = True check_hidden_states_output(__lowercase , __lowercase , __lowercase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __a = True check_hidden_states_output(__lowercase , __lowercase , __lowercase ) def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowercase ) @slow def UpperCamelCase_ ( self : Optional[int] ): '''simple docstring''' for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a = ConvNextVaModel.from_pretrained(__lowercase ) self.assertIsNotNone(__lowercase ) def lowerCAmelCase__ ( ): """simple docstring""" __a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE ( unittest.TestCase ): @cached_property def UpperCamelCase_ ( self : str ): '''simple docstring''' return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None @slow def UpperCamelCase_ ( self : Dict ): '''simple docstring''' __a = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(__lowercase ) __a = self.default_image_processor __a = prepare_img() __a = preprocessor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase ) # forward pass with torch.no_grad(): __a = model(**__lowercase ) # verify the logits __a = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __lowercase ) __a = torch.tensor([0.9996, 0.1966, -0.4386] ).to(__lowercase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) )
302
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase__ = { """configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""], """tokenization_electra""": ["""ElectraTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ["""ElectraTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """ElectraForCausalLM""", """ElectraForMaskedLM""", """ElectraForMultipleChoice""", """ElectraForPreTraining""", """ElectraForQuestionAnswering""", """ElectraForSequenceClassification""", """ElectraForTokenClassification""", """ElectraModel""", """ElectraPreTrainedModel""", """load_tf_weights_in_electra""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFElectraForMaskedLM""", """TFElectraForMultipleChoice""", """TFElectraForPreTraining""", """TFElectraForQuestionAnswering""", """TFElectraForSequenceClassification""", """TFElectraForTokenClassification""", """TFElectraModel""", """TFElectraPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """FlaxElectraForCausalLM""", """FlaxElectraForMaskedLM""", """FlaxElectraForMultipleChoice""", """FlaxElectraForPreTraining""", """FlaxElectraForQuestionAnswering""", """FlaxElectraForSequenceClassification""", """FlaxElectraForTokenClassification""", """FlaxElectraModel""", """FlaxElectraPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
302
1
'''simple docstring''' def _lowercase ( __A ): '''simple docstring''' return " ".join( """""".join(word[::-1] ) if len(__A ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words('Hey wollef sroirraw'))
243
'''simple docstring''' import functools import gc import inspect import torch from .imports import is_npu_available, is_xpu_available def _lowercase ( *__A ): '''simple docstring''' if not isinstance(__A ,__A ): __UpperCamelCase = list(__A ) for i in range(len(__A ) ): __UpperCamelCase = None gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() return objects def _lowercase ( __A ): '''simple docstring''' __UpperCamelCase = [ """CUDA out of memory.""", # CUDA OOM """cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU """DefaultCPUAllocator: can't allocate memory""", # CPU OOM ] if isinstance(__A ,__A ) and len(exception.args ) == 1: return any(err in exception.args[0] for err in _statements ) return False def _lowercase ( __A = None ,__A = 128 ): '''simple docstring''' if function is None: return functools.partial(__A ,starting_batch_size=__A ) __UpperCamelCase = starting_batch_size def decorator(*__A ,**__A ): nonlocal batch_size gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() __UpperCamelCase = list(inspect.signature(__A ).parameters.keys() ) # Guard against user error if len(__A ) < (len(__A ) + 1): __UpperCamelCase = """, """.join([f"{arg}={value}" for arg, value in zip(params[1:] ,args[1:] )] ) raise TypeError( f"Batch size was passed into `{function.__name__}` as the first argument when called." f"Remove this as the decorator already does so: `{function.__name__}({arg_str})`" ) while True: if batch_size == 0: raise RuntimeError("""No executable batch size found, reached zero.""" ) try: return function(__A ,*__A ,**__A ) except Exception as e: if should_reduce_batch_size(__A ): gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() batch_size //= 2 else: raise return decorator
243
1
'''simple docstring''' from dataclasses import dataclass from typing import Tuple import numpy as np import torch @dataclass class lowerCAmelCase__ : lowerCAmelCase_ = 42 # [batch_size x 3] lowerCAmelCase_ = 42 # [batch_size x 3] lowerCAmelCase_ = 42 # [batch_size x 3] lowerCAmelCase_ = 42 # [batch_size x 3] lowerCAmelCase_ = 42 lowerCAmelCase_ = 42 lowerCAmelCase_ = 42 lowerCAmelCase_ = 42 lowerCAmelCase_ = 42 def _snake_case ( self ): """simple docstring""" assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0] assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3 assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2 def _snake_case ( self ): """simple docstring""" return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) ) def _snake_case ( self ): """simple docstring""" return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) ) def _snake_case ( self ): """simple docstring""" lowercase_ : Tuple = torch.arange(self.height * self.width ) lowercase_ : Union[str, Any] = torch.stack( [ pixel_indices % self.width, torch.div(__SCREAMING_SNAKE_CASE , self.width , rounding_mode='''trunc''' ), ] , axis=1 , ) return coords @property def _snake_case ( self ): """simple docstring""" lowercase_ , *lowercase_ : str = self.shape lowercase_ : List[Any] = int(np.prod(__SCREAMING_SNAKE_CASE ) ) lowercase_ : Union[str, Any] = self.get_image_coords() lowercase_ : Tuple = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] ) lowercase_ : Dict = self.get_camera_rays(__SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = rays.view(__SCREAMING_SNAKE_CASE , inner_batch_size * self.height * self.width , 2 , 3 ) return rays def _snake_case ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase_ , *lowercase_ , lowercase_ : str = coords.shape assert n_coords == 2 assert batch_size == self.origin.shape[0] lowercase_ : Any = coords.view(__SCREAMING_SNAKE_CASE , -1 , 2 ) lowercase_ : str = self.resolution() lowercase_ : Dict = self.fov() lowercase_ : Optional[Any] = (flat.float() / (res - 1)) * 2 - 1 lowercase_ : Any = fracs * torch.tan(fov / 2 ) lowercase_ : Optional[int] = fracs.view(__SCREAMING_SNAKE_CASE , -1 , 2 ) lowercase_ : Optional[Any] = ( self.z.view(__SCREAMING_SNAKE_CASE , 1 , 3 ) + self.x.view(__SCREAMING_SNAKE_CASE , 1 , 3 ) * fracs[:, :, :1] + self.y.view(__SCREAMING_SNAKE_CASE , 1 , 3 ) * fracs[:, :, 1:] ) lowercase_ : List[str] = directions / directions.norm(dim=-1 , keepdim=__SCREAMING_SNAKE_CASE ) lowercase_ : str = torch.stack( [ torch.broadcast_to(self.origin.view(__SCREAMING_SNAKE_CASE , 1 , 3 ) , [batch_size, directions.shape[1], 3] ), directions, ] , dim=2 , ) return rays.view(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , 2 , 3 ) def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" assert width * self.height == height * self.width, "The aspect ratio should not change." return DifferentiableProjectiveCamera( origin=self.origin , x=self.x , y=self.y , z=self.z , width=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , x_fov=self.x_fov , y_fov=self.y_fov , ) def snake_case_ ( __SCREAMING_SNAKE_CASE : int ): """simple docstring""" lowercase_ : Any = [] lowercase_ : List[str] = [] lowercase_ : int = [] lowercase_ : str = [] for theta in np.linspace(0 , 2 * np.pi , num=20 ): lowercase_ : Dict = np.array([np.sin(__SCREAMING_SNAKE_CASE ), np.cos(__SCREAMING_SNAKE_CASE ), -0.5] ) z /= np.sqrt(np.sum(z**2 ) ) lowercase_ : Tuple = -z * 4 lowercase_ : str = np.array([np.cos(__SCREAMING_SNAKE_CASE ), -np.sin(__SCREAMING_SNAKE_CASE ), 0.0] ) lowercase_ : Optional[Any] = np.cross(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) origins.append(__SCREAMING_SNAKE_CASE ) xs.append(__SCREAMING_SNAKE_CASE ) ys.append(__SCREAMING_SNAKE_CASE ) zs.append(__SCREAMING_SNAKE_CASE ) return DifferentiableProjectiveCamera( origin=torch.from_numpy(np.stack(__SCREAMING_SNAKE_CASE , axis=0 ) ).float() , x=torch.from_numpy(np.stack(__SCREAMING_SNAKE_CASE , axis=0 ) ).float() , y=torch.from_numpy(np.stack(__SCREAMING_SNAKE_CASE , axis=0 ) ).float() , z=torch.from_numpy(np.stack(__SCREAMING_SNAKE_CASE , axis=0 ) ).float() , width=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , x_fov=0.7 , y_fov=0.7 , shape=(1, len(__SCREAMING_SNAKE_CASE )) , )
93
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase : Optional[Any] = logging.get_logger(__name__) _lowercase : List[str] = { "google/pix2struct-textcaps-base": ( "https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json" ), } class lowerCAmelCase__ ( lowerCamelCase_ ): lowerCAmelCase_ = '''pix2struct_text_model''' lowerCAmelCase_ = ['''past_key_values'''] lowerCAmelCase_ = { '''hidden_size''': '''hidden_size''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self , __SCREAMING_SNAKE_CASE=5_02_44 , __SCREAMING_SNAKE_CASE=7_68 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=20_48 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=1_28 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1E-6 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE="gelu_new" , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" lowercase_ : Any = vocab_size lowercase_ : Tuple = hidden_size lowercase_ : Optional[Any] = d_kv lowercase_ : List[str] = d_ff lowercase_ : List[str] = num_layers lowercase_ : Optional[Any] = num_heads lowercase_ : Union[str, Any] = relative_attention_num_buckets lowercase_ : Optional[int] = relative_attention_max_distance lowercase_ : Union[str, Any] = dropout_rate lowercase_ : Dict = layer_norm_epsilon lowercase_ : Dict = initializer_factor lowercase_ : List[Any] = use_cache lowercase_ : Optional[int] = eos_token_id lowercase_ : Optional[int] = decoder_start_token_id # for backwards compatibility lowercase_ : Any = dense_act_fn super().__init__( pad_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , decoder_start_token_id=__SCREAMING_SNAKE_CASE , tie_word_embeddings=__SCREAMING_SNAKE_CASE , is_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) @classmethod def _snake_case ( cls , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): """simple docstring""" cls._set_token_in_kwargs(__SCREAMING_SNAKE_CASE ) lowercase_ , lowercase_ : Optional[int] = cls.get_config_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get('''model_type''' ) == "pix2struct": lowercase_ : List[Any] = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) class lowerCAmelCase__ ( lowerCamelCase_ ): lowerCAmelCase_ = '''pix2struct_vision_model''' def __init__( self , __SCREAMING_SNAKE_CASE=7_68 , __SCREAMING_SNAKE_CASE=7_68 , __SCREAMING_SNAKE_CASE=20_48 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE="gelu_new" , __SCREAMING_SNAKE_CASE=1E-6 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=1E-1_0 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=40_96 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=1_28 , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" super().__init__(**__SCREAMING_SNAKE_CASE ) lowercase_ : Union[str, Any] = hidden_size lowercase_ : Any = patch_embed_hidden_size lowercase_ : List[Any] = d_ff lowercase_ : Dict = dropout_rate lowercase_ : Any = num_hidden_layers lowercase_ : Any = num_attention_heads lowercase_ : int = initializer_range lowercase_ : Dict = initializer_factor lowercase_ : Dict = attention_dropout lowercase_ : Optional[Any] = layer_norm_eps lowercase_ : str = dense_act_fn lowercase_ : Dict = seq_len lowercase_ : List[Any] = relative_attention_num_buckets lowercase_ : int = relative_attention_max_distance lowercase_ : Optional[int] = d_kv @classmethod def _snake_case ( cls , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): """simple docstring""" cls._set_token_in_kwargs(__SCREAMING_SNAKE_CASE ) lowercase_ , lowercase_ : str = cls.get_config_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get('''model_type''' ) == "pix2struct": lowercase_ : Optional[int] = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) class lowerCAmelCase__ ( lowerCamelCase_ ): lowerCAmelCase_ = '''pix2struct''' lowerCAmelCase_ = True def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" super().__init__(tie_word_embeddings=__SCREAMING_SNAKE_CASE , is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if text_config is None: lowercase_ : Optional[Any] = {} logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' ) if vision_config is None: lowercase_ : Dict = {} logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' ) lowercase_ : str = PixaStructTextConfig(**__SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = PixaStructVisionConfig(**__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = self.text_config.decoder_start_token_id lowercase_ : Union[str, Any] = self.text_config.pad_token_id lowercase_ : Union[str, Any] = self.text_config.eos_token_id lowercase_ : int = initializer_factor lowercase_ : Any = initializer_range lowercase_ : str = self.initializer_range lowercase_ : str = self.initializer_range lowercase_ : int = is_vqa @classmethod def _snake_case ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): """simple docstring""" return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__SCREAMING_SNAKE_CASE ) def _snake_case ( self ): """simple docstring""" lowercase_ : Tuple = copy.deepcopy(self.__dict__ ) lowercase_ : Any = self.text_config.to_dict() lowercase_ : Optional[Any] = self.vision_config.to_dict() lowercase_ : Optional[int] = self.__class__.model_type return output
93
1
"""simple docstring""" from collections import defaultdict def _A (__a , __a ) -> bool: """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = first_str.lower().strip() SCREAMING_SNAKE_CASE_ : List[Any] = second_str.lower().strip() # Remove whitespace SCREAMING_SNAKE_CASE_ : Dict = first_str.replace(''' ''' , '''''' ) SCREAMING_SNAKE_CASE_ : Optional[Any] = second_str.replace(''' ''' , '''''' ) # Strings of different lengths are not anagrams if len(__a ) != len(__a ): return False # Default values for count should be 0 SCREAMING_SNAKE_CASE_ : defaultdict[str, int] = defaultdict(__a ) # For each character in input strings, # increment count in the corresponding for i in range(len(__a ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() UpperCAmelCase_ : Any = input("""Enter the first string """).strip() UpperCAmelCase_ : Optional[int] = input("""Enter the second string """).strip() UpperCAmelCase_ : Union[str, Any] = check_anagrams(input_a, input_b) print(f'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
318
"""simple docstring""" import argparse import os import re import packaging.version UpperCAmelCase_ : Any = """examples/""" UpperCAmelCase_ : Optional[int] = { """examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""), """init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""), """setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""), """doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""), } UpperCAmelCase_ : List[Any] = { """init""": """src/transformers/__init__.py""", """setup""": """setup.py""", } UpperCAmelCase_ : Optional[int] = """README.md""" def _A (__a , __a , __a ) -> int: """simple docstring""" with open(__a , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: SCREAMING_SNAKE_CASE_ : Optional[Any] = f.read() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = REPLACE_PATTERNS[pattern] SCREAMING_SNAKE_CASE_ : Optional[int] = replace.replace('''VERSION''' , __a ) SCREAMING_SNAKE_CASE_ : Tuple = re_pattern.sub(__a , __a ) with open(__a , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.write(__a ) def _A (__a ) -> int: """simple docstring""" for folder, directories, fnames in os.walk(__a ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('''research_projects''' ) if "legacy" in directories: directories.remove('''legacy''' ) for fname in fnames: if fname.endswith('''.py''' ): update_version_in_file(os.path.join(__a , __a ) , __a , pattern='''examples''' ) def _A (__a , __a=False ) -> List[str]: """simple docstring""" for pattern, fname in REPLACE_FILES.items(): update_version_in_file(__a , __a , __a ) if not patch: update_version_in_examples(__a ) def _A () -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = '''🤗 Transformers currently provides the following architectures''' SCREAMING_SNAKE_CASE_ : Optional[int] = '''1. Want to contribute a new model?''' with open(__a , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: SCREAMING_SNAKE_CASE_ : Tuple = f.readlines() # Find the start of the list. SCREAMING_SNAKE_CASE_ : Tuple = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 SCREAMING_SNAKE_CASE_ : Dict = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('''1.''' ): SCREAMING_SNAKE_CASE_ : List[Any] = lines[index].replace( '''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , ) index += 1 with open(__a , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(__a ) def _A () -> List[str]: """simple docstring""" with open(REPLACE_FILES['''init'''] , '''r''' ) as f: SCREAMING_SNAKE_CASE_ : Any = f.read() SCREAMING_SNAKE_CASE_ : Dict = REPLACE_PATTERNS['''init'''][0].search(__a ).groups()[0] return packaging.version.parse(__a ) def _A (__a=False ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = get_version() if patch and default_version.is_devrelease: raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' ) if default_version.is_devrelease: SCREAMING_SNAKE_CASE_ : List[Any] = default_version.base_version elif patch: SCREAMING_SNAKE_CASE_ : int = f'{default_version.major}.{default_version.minor}.{default_version.micro + 1}' else: SCREAMING_SNAKE_CASE_ : Any = f'{default_version.major}.{default_version.minor + 1}.0' # Now let's ask nicely if that's the right one. SCREAMING_SNAKE_CASE_ : int = input(f'Which version are you releasing? [{default_version}]' ) if len(__a ) == 0: SCREAMING_SNAKE_CASE_ : Optional[Any] = default_version print(f'Updating version to {version}.' ) global_version_update(__a , patch=__a ) if not patch: print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() def _A () -> Any: """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = get_version() SCREAMING_SNAKE_CASE_ : Any = f'{current_version.major}.{current_version.minor + 1}.0.dev0' SCREAMING_SNAKE_CASE_ : Union[str, Any] = current_version.base_version # Check with the user we got that right. SCREAMING_SNAKE_CASE_ : int = input(f'Which version are we developing now? [{dev_version}]' ) if len(__a ) == 0: SCREAMING_SNAKE_CASE_ : Optional[int] = dev_version print(f'Updating version to {version}.' ) global_version_update(__a ) print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() if __name__ == "__main__": UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser() parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""") parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""") UpperCAmelCase_ : int = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("""Nothing to do after a patch :-)""") else: post_release_work()
318
1
"""simple docstring""" import argparse import logging import pickle from collections import Counter logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) __snake_case : int = logging.getLogger(__name__) if __name__ == "__main__": __snake_case : int = argparse.ArgumentParser( description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)' ) parser.add_argument( '--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.' ) parser.add_argument( '--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.' ) parser.add_argument('--vocab_size', default=30_522, type=int) __snake_case : Optional[Any] = parser.parse_args() logger.info(F"""Loading data from {args.data_file}""") with open(args.data_file, 'rb') as fp: __snake_case : Optional[Any] = pickle.load(fp) logger.info('Counting occurrences for MLM.') __snake_case : Optional[Any] = Counter() for tk_ids in data: counter.update(tk_ids) __snake_case : Optional[Any] = [0] * args.vocab_size for k, v in counter.items(): __snake_case : Union[str, Any] = v logger.info(F"""Dump to {args.token_counts_dump}""") with open(args.token_counts_dump, 'wb') as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
269
"""simple docstring""" from __future__ import annotations import time import numpy as np __snake_case : Optional[Any] = [8, 5, 9, 7] __snake_case : List[Any] = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] __snake_case : Optional[int] = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class A__ : '''simple docstring''' def __init__( self: Any , _SCREAMING_SNAKE_CASE: list[int] , _SCREAMING_SNAKE_CASE: list[list[int]] , _SCREAMING_SNAKE_CASE: list[list[int]] , ) -> None: """simple docstring""" __lowerCAmelCase : Any = claim_vector __lowerCAmelCase : Tuple = allocated_resources_table __lowerCAmelCase : Tuple = maximum_claim_table def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> list[int]: """simple docstring""" return [ sum(p_item[i] for p_item in self.__allocated_resources_table) for i in range(len(self.__allocated_resources_table[0])) ] def _SCREAMING_SNAKE_CASE ( self: int) -> list[int]: """simple docstring""" return np.array(self.__claim_vector) - np.array( self.__processes_resource_summation()) def _SCREAMING_SNAKE_CASE ( self: int) -> list[list[int]]: """simple docstring""" return [ list(np.array(self.__maximum_claim_table[i]) - np.array(_SCREAMING_SNAKE_CASE)) for i, allocated_resource in enumerate(self.__allocated_resources_table) ] def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> dict[int, list[int]]: """simple docstring""" return {self.__need().index(_SCREAMING_SNAKE_CASE): i for i in self.__need()} def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , **_SCREAMING_SNAKE_CASE: List[Any]) -> None: """simple docstring""" __lowerCAmelCase : Optional[int] = self.__need() __lowerCAmelCase : int = self.__allocated_resources_table __lowerCAmelCase : Dict = self.__available_resources() __lowerCAmelCase : str = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print("_" * 50 + "\n") while need_list: __lowerCAmelCase : int = False for each_need in need_list: __lowerCAmelCase : Dict = True for index, need in enumerate(_SCREAMING_SNAKE_CASE): if need > available_resources[index]: __lowerCAmelCase : Dict = False break if execution: __lowerCAmelCase : Any = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: __lowerCAmelCase : Union[str, Any] = original_need_index print(F"""Process {process_number + 1} is executing.""") # remove the process run from stack need_list.remove(_SCREAMING_SNAKE_CASE) # update available/freed resources stack __lowerCAmelCase : Dict = np.array(_SCREAMING_SNAKE_CASE) + np.array( alloc_resources_table[process_number]) print( "Updated available resource stack for processes: " + " ".join([str(_SCREAMING_SNAKE_CASE) for x in available_resources])) break if safe: print("The process is in a safe state.\n") else: print("System in unsafe state. Aborting...\n") break def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> List[Any]: """simple docstring""" print(" " * 9 + "Allocated Resource Table") for item in self.__allocated_resources_table: print( F"""P{self.__allocated_resources_table.index(_SCREAMING_SNAKE_CASE) + 1}""" + " ".join(F"""{it:>8}""" for it in item) + "\n") print(" " * 9 + "System Resource Table") for item in self.__maximum_claim_table: print( F"""P{self.__maximum_claim_table.index(_SCREAMING_SNAKE_CASE) + 1}""" + " ".join(F"""{it:>8}""" for it in item) + "\n") print( "Current Usage by Active Processes: " + " ".join(str(_SCREAMING_SNAKE_CASE) for x in self.__claim_vector)) print( "Initial Available Resources: " + " ".join(str(_SCREAMING_SNAKE_CASE) for x in self.__available_resources())) time.sleep(1) if __name__ == "__main__": import doctest doctest.testmod()
269
1
from __future__ import annotations from statistics import mean def UpperCamelCase_( _snake_case : list[int] , _snake_case : list[int] , _snake_case : int ): """simple docstring""" __a =[0] * no_of_processes __a =[0] * no_of_processes # Initialize remaining_time to waiting_time. for i in range(_snake_case ): __a =burst_time[i] __a =[] __a =0 __a =0 # When processes are not completed, # A process whose arrival time has passed \ # and has remaining execution time is put into the ready_process. # The shortest process in the ready_process, target_process is executed. while completed != no_of_processes: __a =[] __a =-1 for i in range(_snake_case ): if (arrival_time[i] <= total_time) and (remaining_time[i] > 0): ready_process.append(_snake_case ) if len(_snake_case ) > 0: __a =ready_process[0] for i in ready_process: if remaining_time[i] < remaining_time[target_process]: __a =i total_time += burst_time[target_process] completed += 1 __a =0 __a =( total_time - arrival_time[target_process] - burst_time[target_process] ) else: total_time += 1 return waiting_time def UpperCamelCase_( _snake_case : list[int] , _snake_case : int , _snake_case : list[int] ): """simple docstring""" __a =[0] * no_of_processes for i in range(_snake_case ): __a =burst_time[i] + waiting_time[i] return turn_around_time if __name__ == "__main__": print("[TEST CASE 01]") _lowerCAmelCase : Union[str, Any] = 4 _lowerCAmelCase : Tuple = [2, 5, 3, 7] _lowerCAmelCase : Union[str, Any] = [0, 0, 0, 0] _lowerCAmelCase : Optional[Any] = calculate_waitingtime(arrival_time, burst_time, no_of_processes) _lowerCAmelCase : Optional[Any] = calculate_turnaroundtime( burst_time, no_of_processes, waiting_time ) # Printing the Result print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time") for i, process_id in enumerate(list(range(1, 5))): print( f'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t''' f'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}''' ) print(f'''\nAverage waiting time = {mean(waiting_time):.5f}''') print(f'''Average turnaround time = {mean(turn_around_time):.5f}''')
308
import argparse import dataclasses import json import logging import os import shutil from typing import List, Optional import datasets from accelerate import Accelerator from datasets import load_dataset from finetuning import finetune from tqdm.auto import tqdm import transformers from transformers import AutoConfig, set_seed from transformers.trainer_utils import IntervalStrategy _lowerCAmelCase : Optional[int] = logging.getLogger(__name__) _lowerCAmelCase : Any = "pytorch_model.bin" @dataclasses.dataclass class __magic_name__ : SCREAMING_SNAKE_CASE = dataclasses.field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'} ) SCREAMING_SNAKE_CASE = dataclasses.field( default=lowerCAmelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'} , ) @dataclasses.dataclass class __magic_name__ : SCREAMING_SNAKE_CASE = dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'} ) SCREAMING_SNAKE_CASE = dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'} ) SCREAMING_SNAKE_CASE = dataclasses.field( default=lowerCAmelCase_ , metadata={'help': 'A csv or a json file containing the validation data.'} ) SCREAMING_SNAKE_CASE = dataclasses.field( default=lowerCAmelCase_ , metadata={'help': 'The name of the task to train on.'} , ) SCREAMING_SNAKE_CASE = dataclasses.field( default=lowerCAmelCase_ , metadata={'help': 'The list of labels for the task.'} ) @dataclasses.dataclass class __magic_name__ : SCREAMING_SNAKE_CASE = dataclasses.field( metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'} ) SCREAMING_SNAKE_CASE = dataclasses.field( default='accuracy' , metadata={'help': 'The evaluation metric used for the task.'} ) SCREAMING_SNAKE_CASE = dataclasses.field( default='no' , metadata={ 'help': 'The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]' } , ) SCREAMING_SNAKE_CASE = dataclasses.field( default=1_0 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , ) SCREAMING_SNAKE_CASE = dataclasses.field( default=0.0 , metadata={ 'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.' } , ) SCREAMING_SNAKE_CASE = dataclasses.field( default=lowerCAmelCase_ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'} , ) SCREAMING_SNAKE_CASE = dataclasses.field( default=lowerCAmelCase_ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'} , ) SCREAMING_SNAKE_CASE = dataclasses.field( default=lowerCAmelCase_ , metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'} , ) SCREAMING_SNAKE_CASE = dataclasses.field( default=0.0 , metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'} , ) SCREAMING_SNAKE_CASE = dataclasses.field( default=1_0_0 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , ) SCREAMING_SNAKE_CASE = dataclasses.field( default=lowerCAmelCase_ , metadata={'help': 'Random seed for initialization.'} , ) def UpperCamelCase_( _snake_case : int , _snake_case : str , _snake_case : Optional[int] , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : List[Any] ): """simple docstring""" __a =datasets.concatenate_datasets([infer_input, infer_output] , axis=1 ) if args.do_filter_by_confidence: __a =dataset.filter(lambda _snake_case : example["probability"] > args.confidence_threshold ) if args.do_filter_by_val_performance: assert eval_result >= 0.0 and eval_result <= 1.0 __a =int(eval_result * len(_snake_case ) ) print(_snake_case ) __a =dataset.sort('probability' , reverse=_snake_case ) __a =dataset.select(range(_snake_case ) ) __a =dataset.remove_columns(['label', 'probability'] ) __a =dataset.rename_column('prediction' , 'label' ) __a =dataset.map(lambda _snake_case : {"label": idalabel[example["label"]]} ) __a =dataset.shuffle(seed=args.seed ) __a =os.path.join(_snake_case , F'train_pseudo.{args.data_file_extension}' ) if args.data_file_extension == "csv": dataset.to_csv(_snake_case , index=_snake_case ) else: dataset.to_json(_snake_case ) def UpperCamelCase_( _snake_case : List[Any] , _snake_case : str , _snake_case : int , _snake_case : Optional[int] , **_snake_case : List[str] ): """simple docstring""" __a =Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , ) logger.info(accelerator.state ) # Setup logging, we only want one process per machine to log things on the # screen. accelerator.is_local_main_process is only True for one process per # machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() __a =STModelArguments(model_name_or_path=_snake_case ) __a =STDataArguments(train_file=_snake_case , infer_file=_snake_case ) __a =STTrainingArguments(output_dir=_snake_case ) __a =argparse.Namespace() for arg_class in (model_args, data_args, training_args): for key, value in vars(_snake_case ).items(): setattr(_snake_case , _snake_case , _snake_case ) for key, value in kwargs.items(): if hasattr(_snake_case , _snake_case ): setattr(_snake_case , _snake_case , _snake_case ) # Sanity checks __a ={} __a =None # You need to provide the training data and the data to predict on assert args.train_file is not None assert args.infer_file is not None __a =args.train_file __a =args.infer_file if args.evaluation_strategy != IntervalStrategy.NO.value: assert args.eval_file is not None __a =args.eval_file for key in data_files: __a =data_files[key].split('.' )[-1] assert extension in ["csv", "json"], F'`{key}_file` should be a csv or a json file.' if args.data_file_extension is None: __a =extension else: assert extension == args.data_file_extension, F'`{key}_file` should be a {args.data_file_extension} file`.' assert ( args.eval_metric in datasets.list_metrics() ), F'{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.' # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed ) logger.info('Creating the initial data directory for self-training...' ) __a =F'{args.output_dir}/self-train_iter-{{}}'.format __a =data_dir_format(0 ) if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir , exist_ok=_snake_case ) os.makedirs(_snake_case , exist_ok=_snake_case ) accelerator.wait_for_everyone() __a =None __a =None __a =0 __a =False # Show the progress bar __a =tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process ) # Self-train for iteration in range(0 , int(args.max_selftrain_iterations ) ): __a =data_dir_format(_snake_case ) assert os.path.exists(_snake_case ) # Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for # iteration > 0 __a =os.path.join(_snake_case , 'stage-1' ) __a ={ 'accelerator': accelerator, 'model_name_or_path': args.model_name_or_path, 'cache_dir': args.cache_dir, 'do_train': True, 'train_file': data_files['train'] if iteration == 0 else data_files['train_pseudo'], 'do_eval': True if args.eval_file is not None else False, 'eval_file': data_files['eval'], 'do_predict': True, 'infer_file': data_files['infer'], 'task_name': args.task_name, 'label_list': args.label_list, 'output_dir': current_output_dir, 'eval_metric': args.eval_metric, 'evaluation_strategy': args.evaluation_strategy, 'early_stopping_patience': args.early_stopping_patience, 'early_stopping_threshold': args.early_stopping_threshold, 'seed': args.seed, } # Add additional training arguments for key, value in kwargs.items(): if key not in arguments_dict and not hasattr(_snake_case , _snake_case ): arguments_dict.update({key: value} ) __a =os.path.join(_snake_case , 'best-checkpoint' , _snake_case ) if os.path.exists(_snake_case ): logger.info( 'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.' , _snake_case , _snake_case , ) else: logger.info('***** Running self-training: iteration: %d, stage: 1 *****' , _snake_case ) finetune(**_snake_case ) accelerator.wait_for_everyone() assert os.path.exists(_snake_case ) logger.info('Self-training job completed: iteration: %d, stage: 1.' , _snake_case ) if iteration > 0 and args.finetune_on_labeled_data: # Stage 2 (optional): fine-tuning on the original labeled data __a =os.path.join(_snake_case , 'best-checkpoint' ) __a =os.path.join(_snake_case , 'stage-2' ) # Update arguments_dict __a =model_path __a =data_files['train'] __a =current_output_dir __a =os.path.join(_snake_case , 'best-checkpoint' , _snake_case ) if os.path.exists(_snake_case ): logger.info( 'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.' , _snake_case , _snake_case , ) else: logger.info('***** Running self-training: iteration: %d, stage: 2 *****' , _snake_case ) finetune(**_snake_case ) accelerator.wait_for_everyone() assert os.path.exists(_snake_case ) logger.info('Self-training job completed: iteration: %d, stage: 2.' , _snake_case ) __a =iteration __a =data_dir_format(iteration + 1 ) __a =AutoConfig.from_pretrained(os.path.join(_snake_case , 'best-checkpoint' ) ) __a =config.idalabel __a =os.path.join(_snake_case , 'eval_results_best-checkpoint.json' ) __a =os.path.join(_snake_case , 'test_results_best-checkpoint.json' ) assert os.path.exists(_snake_case ) with open(_snake_case , 'r' ) as f: __a =float(json.load(_snake_case )[args.eval_metric] ) __a =os.path.join(_snake_case , 'infer_output_best-checkpoint.csv' ) assert os.path.exists(_snake_case ) # Loading the dataset from local csv or json files. __a =load_dataset(args.data_file_extension , data_files={'data': data_files['infer']} )['data'] __a =load_dataset('csv' , data_files={'data': infer_output_file} )['data'] if accelerator.is_main_process: os.makedirs(_snake_case , exist_ok=_snake_case ) shutil.copy(_snake_case , os.path.join(_snake_case , F'eval_results_iter-{iteration}.json' ) ) if os.path.exists(_snake_case ): shutil.copy(_snake_case , os.path.join(_snake_case , F'test_results_iter-{iteration}.json' ) ) create_pseudo_labeled_data(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) accelerator.wait_for_everyone() __a =os.path.join(_snake_case , F'train_pseudo.{args.data_file_extension}' ) if args.evaluation_strategy != IntervalStrategy.NO.value: __a =eval_result if best_iteration is None: __a =new_iteration __a =new_eval_result else: if new_eval_result - best_eval_result > args.early_stopping_threshold: __a =new_iteration __a =new_eval_result __a =0 else: if new_eval_result == best_eval_result: __a =new_iteration __a =new_eval_result early_stopping_patience_counter += 1 if early_stopping_patience_counter >= args.early_stopping_patience: __a =True progress_bar.update(1 ) if should_training_stop: break if best_iteration is not None: # Save the best iteration logger.info('Best iteration: %d' , _snake_case ) logger.info('Best evaluation result: %s = %f' , args.eval_metric , _snake_case ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(_snake_case , F'eval_results_iter-{iteration}.json' ) , os.path.join(_snake_case , 'eval_results_best-iteration.json' ) , ) else: # Assume that the last iteration is the best logger.info('Best iteration: %d' , args.max_selftrain_iterations - 1 ) logger.info('Best evaluation result: %s = %f' , args.eval_metric , _snake_case ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(_snake_case , F'eval_results_iter-{args.max_selftrain_iterations - 1}.json' ) , os.path.join(_snake_case , 'eval_results_best-iteration.json' ) , )
308
1
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_xlnet import XLNetTokenizer else: lowerCAmelCase_ : Optional[Any] = None lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__) lowerCAmelCase_ : Tuple = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} lowerCAmelCase_ : Tuple = { 'vocab_file': { 'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model', 'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model', }, 'tokenizer_file': { 'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json', 'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json', }, } lowerCAmelCase_ : Union[str, Any] = { 'xlnet-base-cased': None, 'xlnet-large-cased': None, } lowerCAmelCase_ : int = '▁' # Segments (not really needed) lowerCAmelCase_ : Optional[Any] = 0 lowerCAmelCase_ : int = 1 lowerCAmelCase_ : Optional[Any] = 2 lowerCAmelCase_ : List[str] = 3 lowerCAmelCase_ : List[str] = 4 class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a =VOCAB_FILES_NAMES __a =PRETRAINED_VOCAB_FILES_MAP __a =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a ='left' __a =XLNetTokenizer def __init__( self : Optional[int] , __a : Optional[int]=None , __a : str=None , __a : Any=False , __a : List[Any]=True , __a : List[Any]=False , __a : List[str]="<s>" , __a : Optional[Any]="</s>" , __a : str="<unk>" , __a : Union[str, Any]="<sep>" , __a : List[str]="<pad>" , __a : int="<cls>" , __a : Tuple="<mask>" , __a : Any=["<eop>", "<eod>"] , **__a : int , ): # Mask token behave like a normal word, i.e. include the space before it _a = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token super().__init__( vocab_file=__a , tokenizer_file=__a , do_lower_case=__a , remove_space=__a , keep_accents=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , additional_special_tokens=__a , **__a , ) _a = 3 _a = do_lower_case _a = remove_space _a = keep_accents _a = vocab_file _a = False if not self.vocab_file else True def UpperCamelCase__ ( self : str , __a : List[int] , __a : Optional[List[int]] = None ): _a = [self.sep_token_id] _a = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def UpperCamelCase__ ( self : List[Any] , __a : List[int] , __a : Optional[List[int]] = None ): _a = [self.sep_token_id] _a = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(__a ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return _a = os.path.join( __a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ): copyfile(self.vocab_file , __a ) return (out_vocab_file,)
63
'''simple docstring''' from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable lowerCAmelCase_ : int = {'configuration_gpt_neox': ['GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXConfig']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : Optional[int] = ['GPTNeoXTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : List[str] = [ 'GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST', 'GPTNeoXForCausalLM', 'GPTNeoXForQuestionAnswering', 'GPTNeoXForSequenceClassification', 'GPTNeoXForTokenClassification', 'GPTNeoXLayer', 'GPTNeoXModel', 'GPTNeoXPreTrainedModel', ] if TYPE_CHECKING: from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox import ( GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXLayer, GPTNeoXModel, GPTNeoXPreTrainedModel, ) else: import sys lowerCAmelCase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
63
1
from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging UpperCamelCase__ : Dict = logging.get_logger(__name__) class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = ['input_features', 'attention_mask'] def __init__( self : int ,__lowerCamelCase : str=80 ,__lowerCamelCase : str=1_60_00 ,__lowerCamelCase : Any=80 ,__lowerCamelCase : List[Any]=0.0 ,__lowerCamelCase : Any=True ,__lowerCamelCase : Any=True ,__lowerCamelCase : int=True ,**__lowerCamelCase : Optional[int] ,): '''simple docstring''' super().__init__(feature_size=__lowerCamelCase ,sampling_rate=__lowerCamelCase ,padding_value=__lowerCamelCase ,**__lowerCamelCase ) a = num_mel_bins a = do_ceptral_normalize a = normalize_means a = normalize_vars a = True def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ,__lowerCamelCase : np.ndarray ,): '''simple docstring''' a = waveform * (2**15) # Kaldi compliance: 16-bit signed integers a = torch.from_numpy(__lowerCamelCase ).unsqueeze(0 ) a = ta_kaldi.fbank(__lowerCamelCase ,num_mel_bins=self.num_mel_bins ,sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase : np.ndarray ,__lowerCamelCase : int ,__lowerCamelCase : Optional[bool] = True ,__lowerCamelCase : Optional[bool] = True ,__lowerCamelCase : float = 0.0 ,): '''simple docstring''' if normalize_means: a = x[:input_length].mean(axis=0 ) a = np.subtract(__lowerCamelCase ,__lowerCamelCase ) if normalize_vars: a = x[:input_length].std(axis=0 ) a = np.divide(__lowerCamelCase ,__lowerCamelCase ) if input_length < x.shape[0]: a = padding_value # make sure array is in float32 a = x.astype(np.floataa ) return x def SCREAMING_SNAKE_CASE_ ( self : str ,__lowerCamelCase : List[np.ndarray] ,__lowerCamelCase : Optional[np.ndarray] = None ): '''simple docstring''' a = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(__lowerCamelCase ,__lowerCamelCase ,self.normalize_means ,self.normalize_vars ,self.padding_value ) for x, n in zip(__lowerCamelCase ,__lowerCamelCase ) ] def __call__( self : Optional[Any] ,__lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,__lowerCamelCase : Union[bool, str, PaddingStrategy] = False ,__lowerCamelCase : Optional[int] = None ,__lowerCamelCase : bool = False ,__lowerCamelCase : Optional[int] = None ,__lowerCamelCase : Optional[Union[str, TensorType]] = None ,__lowerCamelCase : Optional[int] = None ,__lowerCamelCase : Optional[bool] = None ,**__lowerCamelCase : Tuple ,): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of""" F""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with""" F""" {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) a = isinstance(__lowerCamelCase ,np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" ) a = is_batched_numpy or ( isinstance(__lowerCamelCase ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) )) ) if is_batched: a = [np.asarray(__lowerCamelCase ,dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(__lowerCamelCase ,np.ndarray ): a = np.asarray(__lowerCamelCase ,dtype=np.floataa ) elif isinstance(__lowerCamelCase ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): a = raw_speech.astype(np.floataa ) # always return batch if not is_batched: a = [raw_speech] # extract fbank features a = [self._extract_fbank_features(__lowerCamelCase ) for waveform in raw_speech] # convert into correct format for padding a = BatchFeature({'''input_features''': features} ) a = self.pad( __lowerCamelCase ,padding=__lowerCamelCase ,max_length=__lowerCamelCase ,truncation=__lowerCamelCase ,pad_to_multiple_of=__lowerCamelCase ,return_attention_mask=__lowerCamelCase ,**__lowerCamelCase ,) # make sure list is in array format a = padded_inputs.get('''input_features''' ) if isinstance(input_features[0] ,__lowerCamelCase ): a = [np.asarray(__lowerCamelCase ,dtype=np.floataa ) for feature in input_features] a = padded_inputs.get('''attention_mask''' ) if attention_mask is not None: a = [np.asarray(__lowerCamelCase ,dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: a = ( np.array(__lowerCamelCase ,dtype=np.intaa ) if self._get_padding_strategies(__lowerCamelCase ,max_length=__lowerCamelCase ) is not PaddingStrategy.DO_NOT_PAD else None ) a = self.normalize( padded_inputs['''input_features'''] ,attention_mask=__lowerCamelCase ) if return_tensors is not None: a = padded_inputs.convert_to_tensors(__lowerCamelCase ) return padded_inputs
330
from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING UpperCamelCase__ : List[str] = logging.get_logger(__name__) @add_end_docstrings(a_ ) class lowerCamelCase_ ( a_ ): def __init__( self : int ,*__lowerCamelCase : str ,**__lowerCamelCase : Optional[Any] ): '''simple docstring''' super().__init__(*__lowerCamelCase ,**__lowerCamelCase ) requires_backends(self ,'''vision''' ) self.check_model_type(__lowerCamelCase ) def __call__( self : int ,__lowerCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] ,**__lowerCamelCase : str ): '''simple docstring''' return super().__call__(__lowerCamelCase ,**__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Any ,**__lowerCamelCase : Dict ): '''simple docstring''' return {}, {}, {} def SCREAMING_SNAKE_CASE_ ( self : int ,__lowerCamelCase : Union[str, Any] ): '''simple docstring''' a = load_image(__lowerCamelCase ) a = image.size a = self.image_processor(images=__lowerCamelCase ,return_tensors=self.framework ) return model_inputs def SCREAMING_SNAKE_CASE_ ( self : str ,__lowerCamelCase : Union[str, Any] ): '''simple docstring''' a = self.model(**__lowerCamelCase ) return model_outputs def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : Union[str, Any] ): '''simple docstring''' a = model_outputs.predicted_depth a = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) ,size=self.image_size[::-1] ,mode='''bicubic''' ,align_corners=__lowerCamelCase ) a = prediction.squeeze().cpu().numpy() a = (output * 2_55 / np.max(__lowerCamelCase )).astype('''uint8''' ) a = Image.fromarray(__lowerCamelCase ) a = {} a = predicted_depth a = depth return output_dict
330
1
import sys UpperCAmelCase__ = ( "73167176531330624919225119674426574742355349194934" "96983520312774506326239578318016984801869478851843" "85861560789112949495459501737958331952853208805511" "12540698747158523863050715693290963295227443043557" "66896648950445244523161731856403098711121722383113" "62229893423380308135336276614282806444486645238749" "30358907296290491560440772390713810515859307960866" "70172427121883998797908792274921901699720888093776" "65727333001053367881220235421809751254540594752243" "52584907711670556013604839586446706324415722155397" "53697817977846174064955149290862569321978468622482" "83972241375657056057490261407972968652414535100474" "82166370484403199890008895243450658541227588666881" "16427171479924442928230863465674813919123162824586" "17866458359124566529476545682848912883142607690042" "24219022671055626321111109370544217506941658960408" "07198403850962455444362981230987879927244284909188" "84580156166097919133875499200524063689912560717606" "05886116467109405077541002256983155200055935729725" "71636269561882670428252483600823257530420752963450" ) def A ( _UpperCAmelCase : str = N ) -> int: '''simple docstring''' _UpperCAmelCase = -sys.maxsize - 1 for i in range(len(_UpperCAmelCase ) - 12 ): _UpperCAmelCase = 1 for j in range(13 ): product *= int(n[i + j] ) if product > largest_product: _UpperCAmelCase = product return largest_product if __name__ == "__main__": print(f"""{solution() = }""")
339
import requests from bsa import BeautifulSoup def A ( _UpperCAmelCase : str , _UpperCAmelCase : dict ) -> str: '''simple docstring''' _UpperCAmelCase = BeautifulSoup(requests.get(_UpperCAmelCase , params=_UpperCAmelCase ).content , 'html.parser' ) _UpperCAmelCase = soup.find('div' , attrs={'class': 'gs_ri'} ) _UpperCAmelCase = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' ) return anchors[2].get_text() if __name__ == "__main__": UpperCAmelCase__ = { "title": ( "Precisely geometry controlled microsupercapacitors for ultrahigh areal " "capacitance, volumetric capacitance, and energy density" ), "journal": "Chem. Mater.", "volume": 30, "pages": "3979-3990", "year": 2018, "hl": "en", } print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
339
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __UpperCAmelCase = { 'configuration_groupvit': [ 'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GroupViTConfig', 'GroupViTOnnxConfig', 'GroupViTTextConfig', 'GroupViTVisionConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'GroupViTModel', 'GroupViTPreTrainedModel', 'GroupViTTextModel', 'GroupViTVisionModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFGroupViTModel', 'TFGroupViTPreTrainedModel', 'TFGroupViTTextModel', 'TFGroupViTVisionModel', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
355
import argparse import collections import torch from flax import traverse_util from tax import checkpoints from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def __UpperCamelCase ( lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : Any , lowercase__ : Tuple="attention" ) -> Dict: '''simple docstring''' lowerCAmelCase_ : Any = params[f'{prefix}/layers_{i}/{layer_name}/key/kernel'] lowerCAmelCase_ : Optional[Any] = params[f'{prefix}/layers_{i}/{layer_name}/out/kernel'] lowerCAmelCase_ : str = params[f'{prefix}/layers_{i}/{layer_name}/query/kernel'] lowerCAmelCase_ : Tuple = params[f'{prefix}/layers_{i}/{layer_name}/value/kernel'] return k, o, q, v def __UpperCamelCase ( lowercase__ : List[str] , lowercase__ : Dict , lowercase__ : List[str] , lowercase__ : str=False ) -> int: '''simple docstring''' if split_mlp_wi: lowerCAmelCase_ : List[Any] = params[f'{prefix}/layers_{i}/mlp/wi_0/kernel'] lowerCAmelCase_ : List[Any] = params[f'{prefix}/layers_{i}/mlp/wi_1/kernel'] lowerCAmelCase_ : int = (wi_a, wi_a) else: lowerCAmelCase_ : str = params[f'{prefix}/layers_{i}/mlp/wi/kernel'] lowerCAmelCase_ : int = params[f'{prefix}/layers_{i}/mlp/wo/kernel'] return wi, wo def __UpperCamelCase ( lowercase__ : Optional[int] , lowercase__ : Dict , lowercase__ : Optional[Any] , lowercase__ : Tuple ) -> int: '''simple docstring''' return params[f'{prefix}/layers_{i}/{layer_name}/scale'] def __UpperCamelCase ( lowercase__ : dict , *, lowercase__ : int , lowercase__ : bool ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ : List[str] = traverse_util.flatten_dict(variables["""target"""] ) lowerCAmelCase_ : List[Any] = {"""/""".join(lowercase__ ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi lowerCAmelCase_ : Dict = """encoder/layers_0/mlp/wi_0/kernel""" in old print("""Split MLP:""" , lowercase__ ) lowerCAmelCase_ : Optional[Any] = collections.OrderedDict() # Shared embeddings. lowerCAmelCase_ : Tuple = old["""token_embedder/embedding"""] # Encoder. for i in range(lowercase__ ): # Block i, layer 0 (Self Attention). lowerCAmelCase_ : Optional[Any] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_attention_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Tuple = tax_attention_lookup(lowercase__ , lowercase__ , """encoder""" , """attention""" ) lowerCAmelCase_ : Optional[int] = layer_norm lowerCAmelCase_ : Optional[int] = k.T lowerCAmelCase_ : List[Any] = o.T lowerCAmelCase_ : Union[str, Any] = q.T lowerCAmelCase_ : Any = v.T # Block i, layer 1 (MLP). lowerCAmelCase_ : Any = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_mlp_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = tax_mlp_lookup(lowercase__ , lowercase__ , """encoder""" , lowercase__ ) lowerCAmelCase_ : str = layer_norm if split_mlp_wi: lowerCAmelCase_ : Optional[int] = wi[0].T lowerCAmelCase_ : Optional[Any] = wi[1].T else: lowerCAmelCase_ : int = wi.T lowerCAmelCase_ : Optional[Any] = wo.T lowerCAmelCase_ : Tuple = old[ """encoder/relpos_bias/rel_embedding""" ].T lowerCAmelCase_ : str = old["""encoder/encoder_norm/scale"""] if not is_encoder_only: # Decoder. for i in range(lowercase__ ): # Block i, layer 0 (Self Attention). lowerCAmelCase_ : int = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_self_attention_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """self_attention""" ) lowerCAmelCase_ : Dict = layer_norm lowerCAmelCase_ : Union[str, Any] = k.T lowerCAmelCase_ : Union[str, Any] = o.T lowerCAmelCase_ : Any = q.T lowerCAmelCase_ : Tuple = v.T # Block i, layer 1 (Cross Attention). lowerCAmelCase_ : Optional[Any] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_cross_attention_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Tuple = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """encoder_decoder_attention""" ) lowerCAmelCase_ : Optional[int] = layer_norm lowerCAmelCase_ : Any = k.T lowerCAmelCase_ : Any = o.T lowerCAmelCase_ : Optional[int] = q.T lowerCAmelCase_ : Dict = v.T # Block i, layer 2 (MLP). lowerCAmelCase_ : List[str] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_mlp_layer_norm""" ) lowerCAmelCase_ , lowerCAmelCase_ : int = tax_mlp_lookup(lowercase__ , lowercase__ , """decoder""" , lowercase__ ) lowerCAmelCase_ : Any = layer_norm if split_mlp_wi: lowerCAmelCase_ : List[str] = wi[0].T lowerCAmelCase_ : List[Any] = wi[1].T else: lowerCAmelCase_ : Optional[Any] = wi.T lowerCAmelCase_ : str = wo.T lowerCAmelCase_ : int = old["""decoder/decoder_norm/scale"""] lowerCAmelCase_ : Union[str, Any] = old[ """decoder/relpos_bias/rel_embedding""" ].T # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: lowerCAmelCase_ : Optional[Any] = old["""decoder/logits_dense/kernel"""].T return new def __UpperCamelCase ( lowercase__ : Union[str, Any] , lowercase__ : bool ) -> Any: '''simple docstring''' lowerCAmelCase_ : Tuple = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: lowerCAmelCase_ : List[Any] = state_dict["""shared.weight"""] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: lowerCAmelCase_ : Union[str, Any] = state_dict["""shared.weight"""] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("""Using shared word embeddings as lm_head.""" ) lowerCAmelCase_ : List[str] = state_dict["""shared.weight"""] return state_dict def __UpperCamelCase ( lowercase__ : Dict , lowercase__ : Optional[int] , lowercase__ : Union[str, Any] , lowercase__ : List[str] ) -> Tuple: '''simple docstring''' lowerCAmelCase_ : Tuple = checkpoints.load_tax_checkpoint(lowercase__ ) lowerCAmelCase_ : List[str] = convert_tax_to_pytorch(lowercase__ , num_layers=config.num_layers , is_encoder_only=lowercase__ ) lowerCAmelCase_ : List[str] = make_state_dict(lowercase__ , lowercase__ ) model.load_state_dict(lowercase__ , strict=lowercase__ ) def __UpperCamelCase ( lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : List[Any] , lowercase__ : bool = False ) -> int: '''simple docstring''' lowerCAmelCase_ : Any = TaConfig.from_json_file(lowercase__ ) print(f'Building PyTorch model from configuration: {config}' ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: lowerCAmelCase_ : Optional[int] = TaEncoderModel(lowercase__ ) else: lowerCAmelCase_ : Dict = TaForConditionalGeneration(lowercase__ ) # Load weights from tf checkpoint load_tax_weights_in_ta(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) # Save pytorch-model print(f'Save PyTorch model to {pytorch_dump_path}' ) model.save_pretrained(lowercase__ ) # Verify that we can load the checkpoint. model.from_pretrained(lowercase__ ) print("""Done""" ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.') # Required parameters parser.add_argument( '--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False ) __UpperCAmelCase = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only )
28
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging snake_case_ : Union[str, Any] = logging.get_logger(__name__) snake_case_ : Any = {"vocab_file": "sentencepiece.bpe.model"} snake_case_ : Optional[int] = { "vocab_file": { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model", } } snake_case_ : int = { "camembert-base": 512, } snake_case_ : Dict = "▁" class __snake_case ( A_ ): UpperCAmelCase__ : Dict = VOCAB_FILES_NAMES UpperCAmelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ : Union[str, Any] = ['''input_ids''', '''attention_mask'''] def __init__( self : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : int="<s>" , _snake_case : Union[str, Any]="</s>" , _snake_case : str="</s>" , _snake_case : Dict="<s>" , _snake_case : int="<unk>" , _snake_case : int="<pad>" , _snake_case : Union[str, Any]="<mask>" , _snake_case : Optional[Any]=["<s>NOTUSED", "</s>NOTUSED"] , _snake_case : Optional[Dict[str, Any]] = None , **_snake_case : List[Any] , ): """simple docstring""" UpperCAmelCase_ = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase) if isinstance(_lowerCamelCase , _lowerCamelCase) else mask_token UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , ) UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(_lowerCamelCase)) UpperCAmelCase_ = vocab_file # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual # sentencepiece vocabulary (this is the case for <s> and </s> UpperCAmelCase_ = {'''<s>NOTUSED''': 0, '''<pad>''': 1, '''</s>NOTUSED''': 2, '''<unk>''': 3} UpperCAmelCase_ = len(self.fairseq_tokens_to_ids) UpperCAmelCase_ = len(self.sp_model) + len(self.fairseq_tokens_to_ids) UpperCAmelCase_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def lowerCamelCase ( self : str , _snake_case : List[int] , _snake_case : Optional[List[int]] = None): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCAmelCase_ = [self.cls_token_id] UpperCAmelCase_ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCamelCase ( self : List[Any] , _snake_case : List[int] , _snake_case : Optional[List[int]] = None , _snake_case : bool = False): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase) if token_ids_a is None: return [1] + ([0] * len(_lowerCamelCase)) + [1] return [1] + ([0] * len(_lowerCamelCase)) + [1, 1] + ([0] * len(_lowerCamelCase)) + [1] def lowerCamelCase ( self : Dict , _snake_case : List[int] , _snake_case : Optional[List[int]] = None): """simple docstring""" UpperCAmelCase_ = [self.sep_token_id] UpperCAmelCase_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] @property def lowerCamelCase ( self : List[Any]): """simple docstring""" return len(self.fairseq_tokens_to_ids) + len(self.sp_model) def lowerCamelCase ( self : Any): """simple docstring""" UpperCAmelCase_ = {self.convert_ids_to_tokens(_lowerCamelCase): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def lowerCamelCase ( self : List[str] , _snake_case : str): """simple docstring""" return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase) def lowerCamelCase ( self : Tuple , _snake_case : int): """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] elif self.sp_model.PieceToId(_lowerCamelCase) == 0: # Convert sentence piece unk token to fairseq unk token index return self.unk_token_id return self.fairseq_offset + self.sp_model.PieceToId(_lowerCamelCase) def lowerCamelCase ( self : List[Any] , _snake_case : Optional[int]): """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset) def lowerCamelCase ( self : int , _snake_case : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = [] UpperCAmelCase_ = '''''' UpperCAmelCase_ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_lowerCamelCase) + token UpperCAmelCase_ = True UpperCAmelCase_ = [] else: current_sub_tokens.append(_lowerCamelCase) UpperCAmelCase_ = False out_string += self.sp_model.decode(_lowerCamelCase) return out_string.strip() def __getstate__( self : str): """simple docstring""" UpperCAmelCase_ = self.__dict__.copy() UpperCAmelCase_ = None return state def __setstate__( self : Dict , _snake_case : int): """simple docstring""" UpperCAmelCase_ = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs'''): UpperCAmelCase_ = {} UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def lowerCamelCase ( self : str , _snake_case : str , _snake_case : Optional[str] = None): """simple docstring""" if not os.path.isdir(_lowerCamelCase): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""") return UpperCAmelCase_ = os.path.join( _lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file''']) if os.path.abspath(self.vocab_file) != os.path.abspath(_lowerCamelCase) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , _lowerCamelCase) elif not os.path.isfile(self.vocab_file): with open(_lowerCamelCase , '''wb''') as fi: UpperCAmelCase_ = self.sp_model.serialized_model_proto() fi.write(_lowerCamelCase) return (out_vocab_file,)
51
"""simple docstring""" import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def _UpperCAmelCase ( __lowerCamelCase : int = 3 ) -> qiskit.result.counts.Counts: if isinstance(__lowerCamelCase , __lowerCamelCase ): raise TypeError('''number of qubits must be a integer.''' ) if number_of_qubits <= 0: raise ValueError('''number of qubits must be > 0.''' ) if math.floor(__lowerCamelCase ) != number_of_qubits: raise ValueError('''number of qubits must be exact integer.''' ) if number_of_qubits > 10: raise ValueError('''number of qubits too large to simulate(>10).''' ) _snake_case = QuantumRegister(__lowerCamelCase , '''qr''' ) _snake_case = ClassicalRegister(__lowerCamelCase , '''cr''' ) _snake_case = QuantumCircuit(__lowerCamelCase , __lowerCamelCase ) _snake_case = number_of_qubits for i in range(__lowerCamelCase ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(__lowerCamelCase ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , __lowerCamelCase , __lowerCamelCase ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(__lowerCamelCase , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(__lowerCamelCase , __lowerCamelCase ) # simulate with 10000 shots _snake_case = Aer.get_backend('''qasm_simulator''' ) _snake_case = execute(__lowerCamelCase , __lowerCamelCase , shots=1_00_00 ) return job.result().get_counts(__lowerCamelCase ) if __name__ == "__main__": print( F"Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}" )
288
0
from __future__ import annotations a__: str = { 'A': ['B', 'C', 'E'], 'B': ['A', 'D', 'E'], 'C': ['A', 'F', 'G'], 'D': ['B'], 'E': ['A', 'B', 'D'], 'F': ['C'], 'G': ['C'], } class SCREAMING_SNAKE_CASE__ : def __init__( self,__lowerCamelCase,__lowerCamelCase ): A__ = graph # mapping node to its parent in resulting breadth first tree A__ = {} A__ = source_vertex def UpperCamelCase ( self ): A__ = {self.source_vertex} A__ = None A__ = [self.source_vertex] # first in first out queue while queue: A__ = queue.pop(0 ) for adjacent_vertex in self.graph[vertex]: if adjacent_vertex not in visited: visited.add(__lowerCamelCase ) A__ = vertex queue.append(__lowerCamelCase ) def UpperCamelCase ( self,__lowerCamelCase ): if target_vertex == self.source_vertex: return self.source_vertex A__ = self.parent.get(__lowerCamelCase ) if target_vertex_parent is None: A__ = ( f"No path from vertex: {self.source_vertex} to vertex: {target_vertex}" ) raise ValueError(__lowerCamelCase ) return self.shortest_path(__lowerCamelCase ) + f"->{target_vertex}" if __name__ == "__main__": a__: List[Any] = Graph(graph, 'G') g.breath_first_search() print(g.shortest_path('D')) print(g.shortest_path('G')) print(g.shortest_path('Foo'))
368
def UpperCamelCase__( UpperCamelCase__ : int = 1_00 )->int: A__ = (n * (n + 1) // 2) ** 2 A__ = n * (n + 1) * (2 * n + 1) // 6 return sum_cubes - sum_squares if __name__ == "__main__": print(F"{solution() = }")
39
0