code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class UpperCAmelCase_ : """simple docstring""" def __init__( self , _a , _a=1_3 , _a=1_0 , _a=3 , _a=2 , _a=2 , _a=True , _a=True , _a=3_2 , _a=5 , _a=4 , _a=3_7 , _a="gelu" , _a=0.1 , _a=0.1 , _a=1_0 , _a=0.02 , _a="divided_space_time" , _a=None , ) -> List[str]: _a : Tuple = parent _a : str = batch_size _a : Optional[int] = image_size _a : int = num_channels _a : Any = patch_size _a : List[str] = num_frames _a : int = is_training _a : Optional[int] = use_labels _a : Union[str, Any] = hidden_size _a : Union[str, Any] = num_hidden_layers _a : List[str] = num_attention_heads _a : Optional[int] = intermediate_size _a : List[str] = hidden_act _a : List[str] = hidden_dropout_prob _a : List[Any] = attention_probs_dropout_prob _a : Union[str, Any] = attention_type _a : Any = initializer_range _a : Any = scope _a : List[Any] = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token _a : Any = (image_size // patch_size) ** 2 _a : List[str] = (num_frames) * self.num_patches_per_frame + 1 def __lowercase ( self ) -> Dict: _a : str = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) _a : str = None if self.use_labels: _a : List[str] = ids_tensor([self.batch_size] , self.num_labels ) _a : Tuple = self.get_config() return config, pixel_values, labels def __lowercase ( self ) -> Optional[Any]: _a : Any = TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) _a : List[Any] = self.num_labels return config def __lowercase ( self , _a , _a , _a ) -> int: _a : List[str] = TimesformerModel(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _a : List[str] = model(lowerCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowercase ( self , _a , _a , _a ) -> Tuple: _a : Union[str, Any] = TimesformerForVideoClassification(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _a : str = model(lowerCAmelCase_ ) # verify the logits shape _a : Tuple = torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , lowerCAmelCase_ ) def __lowercase ( self ) -> Optional[int]: _a : Union[str, Any] = self.prepare_config_and_inputs() _a , _a , _a : List[Any] = config_and_inputs _a : Optional[int] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): """simple docstring""" UpperCAmelCase__ : Any = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () UpperCAmelCase__ : List[str] = ( {'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification} if is_torch_available() else {} ) UpperCAmelCase__ : Any = False UpperCAmelCase__ : Dict = False UpperCAmelCase__ : List[str] = False UpperCAmelCase__ : Tuple = False def __lowercase ( self ) -> Optional[Any]: _a : Any = TimesformerModelTester(self ) _a : str = ConfigTester( self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=3_7 ) def __lowercase ( self , _a , _a , _a=False ) -> Optional[Any]: _a : Optional[Any] = copy.deepcopy(lowerCAmelCase_ ) if return_labels: if model_class in get_values(lowerCAmelCase_ ): _a : Optional[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ ) return inputs_dict def __lowercase ( self ) -> Dict: self.config_tester.run_common_tests() @unittest.skip(reason='''TimeSformer does not use inputs_embeds''' ) def __lowercase ( self ) -> Tuple: pass def __lowercase ( self ) -> List[str]: _a , _a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a : Optional[int] = model_class(lowerCAmelCase_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _a : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear ) ) def __lowercase ( self ) -> str: _a , _a : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a : Dict = model_class(lowerCAmelCase_ ) _a : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _a : Tuple = [*signature.parameters.keys()] _a : int = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , lowerCAmelCase_ ) def __lowercase ( self ) -> List[Any]: _a : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase_ ) def __lowercase ( self ) -> Tuple: _a : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*lowerCAmelCase_ ) @slow def __lowercase ( self ) -> Union[str, Any]: for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _a : Union[str, Any] = TimesformerModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) def __lowercase ( self ) -> Optional[int]: if not self.has_attentions: pass else: _a , _a : str = self.model_tester.prepare_config_and_inputs_for_common() _a : Any = True for model_class in self.all_model_classes: _a : Union[str, Any] = self.model_tester.seq_length _a : List[Any] = self.model_tester.num_frames _a : Dict = True _a : List[str] = False _a : Union[str, Any] = True _a : Any = model_class(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() with torch.no_grad(): _a : List[Any] = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) ) _a : List[str] = outputs.attentions self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] _a : List[Any] = True _a : Optional[int] = model_class(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() with torch.no_grad(): _a : Any = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) ) _a : List[Any] = outputs.attentions self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) _a : List[str] = len(lowerCAmelCase_ ) # Check attention is always last and order is fine _a : Dict = True _a : int = True _a : Optional[int] = model_class(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() with torch.no_grad(): _a : Tuple = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) ) self.assertEqual(out_len + 1 , len(lowerCAmelCase_ ) ) _a : Optional[int] = outputs.attentions self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def __lowercase ( self ) -> List[Any]: def check_hidden_states_output(_a , _a , _a ): _a : List[str] = model_class(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() with torch.no_grad(): _a : List[str] = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) ) _a : Union[str, Any] = outputs.hidden_states _a : Union[str, Any] = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ ) _a : Optional[int] = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) _a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a : Any = True check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _a : str = True check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def __UpperCAmelCase ( ) -> int: """simple docstring""" _a : int = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' ,filename='''eating_spaghetti.npy''' ,repo_type='''dataset''' ) _a : Union[str, Any] = np.load(snake_case__ ) return list(snake_case__ ) @require_torch @require_vision class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" @cached_property def __lowercase ( self ) -> List[str]: # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def __lowercase ( self ) -> Union[str, Any]: _a : int = TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to( lowerCAmelCase_ ) _a : Any = self.default_image_processor _a : Any = prepare_video() _a : Any = image_processor(video[:8] , return_tensors='''pt''' ).to(lowerCAmelCase_ ) # forward pass with torch.no_grad(): _a : Any = model(**lowerCAmelCase_ ) # verify the logits _a : Dict = torch.Size((1, 4_0_0) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase_ ) _a : List[str] = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(lowerCAmelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
364
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a__ = { '''configuration_xmod''': [ '''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XmodConfig''', '''XmodOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ = [ '''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XmodForCausalLM''', '''XmodForMaskedLM''', '''XmodForMultipleChoice''', '''XmodForQuestionAnswering''', '''XmodForSequenceClassification''', '''XmodForTokenClassification''', '''XmodModel''', '''XmodPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
15
0
def __UpperCAmelCase ( __a : int ,__a : int ) -> Optional[Any]: """simple docstring""" while second != 0: _a : Tuple = first & second first ^= second _a : int = c << 1 return first if __name__ == "__main__": import doctest doctest.testmod() a__ = int(input('''Enter the first number: ''').strip()) a__ = int(input('''Enter the second number: ''').strip()) print(f'''{add(first, second) = }''')
365
import re import tempfile from pathlib import Path import pytest import yaml from datasets.utils.readme import ReadMe # @pytest.fixture # def example_yaml_structure(): a__ = yaml.safe_load( '''\ name: "" allow_empty: false allow_empty_text: true subsections: - name: "Dataset Card for X" # First-level markdown heading allow_empty: false allow_empty_text: true subsections: - name: "Table of Contents" allow_empty: false allow_empty_text: false subsections: null - name: "Dataset Description" allow_empty: false allow_empty_text: false subsections: - name: "Dataset Summary" allow_empty: false allow_empty_text: false subsections: null - name: "Supported Tasks and Leaderboards" allow_empty: true allow_empty_text: true subsections: null - name: Languages allow_empty: false allow_empty_text: true subsections: null ''' ) a__ = { '''name''': '''root''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ { '''name''': '''Dataset Card for My Dataset''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ {'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []}, { '''name''': '''Dataset Description''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Dataset Summary''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [], }, { '''name''': '''Supported Tasks and Leaderboards''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], }, {'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []}, ], }, ], } ], } a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. #### Extra Ignored Subsection ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = { '''name''': '''root''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ { '''name''': '''Dataset Card for My Dataset''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ {'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []}, { '''name''': '''Dataset Description''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Dataset Summary''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Extra Ignored Subsection''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], } ], }, { '''name''': '''Supported Tasks and Leaderboards''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], }, {'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []}, ], }, ], } ], } a__ = '''\ --- --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = ( '''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.''' ) a__ = '''\ # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = ( '''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.''' ) a__ = '''\ --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Languages Language Text ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.''' a__ = '''\ --- language: - zh - en --- ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text # Dataset Card My Dataset ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.''' a__ = '''\ --- language: - zh - en --- # Dataset Card My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.''' a__ = '''''' a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.''' @pytest.mark.parametrize( '''readme_md, expected_dict''' ,[ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] ,) def __UpperCAmelCase ( __a : Union[str, Any] ,__a : List[str] ) -> Optional[int]: """simple docstring""" assert ReadMe.from_string(__a ,__a ).to_dict() == expected_dict @pytest.mark.parametrize( '''readme_md, expected_error''' ,[ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] ,) def __UpperCAmelCase ( __a : List[str] ,__a : Optional[Any] ) -> Union[str, Any]: """simple docstring""" with pytest.raises(__a ,match=re.escape(expected_error.format(path='''root''' ) ) ): _a : List[Any] = ReadMe.from_string(__a ,__a ) readme.validate() @pytest.mark.parametrize( '''readme_md, expected_error''' ,[ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] ,) def __UpperCAmelCase ( __a : Dict ,__a : Dict ) -> Tuple: """simple docstring""" with pytest.raises(__a ,match=re.escape(expected_error.format(path='''root''' ) ) ): ReadMe.from_string(__a ,__a ) @pytest.mark.parametrize( '''readme_md,''' ,[ (README_MULTIPLE_SAME_HEADING_1), ] ,) def __UpperCAmelCase ( __a : Optional[Any] ) -> Tuple: """simple docstring""" ReadMe.from_string(__a ,__a ,suppress_parsing_errors=__a ) @pytest.mark.parametrize( '''readme_md, expected_dict''' ,[ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] ,) def __UpperCAmelCase ( __a : Union[str, Any] ,__a : Any ) -> Optional[int]: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: _a : Tuple = Path(__a ) / '''README.md''' with open(__a ,'''w+''' ) as readme_file: readme_file.write(__a ) _a : Optional[Any] = ReadMe.from_readme(__a ,__a ).to_dict() assert out["name"] == path assert out["text"] == "" assert out["is_empty_text"] assert out["subsections"] == expected_dict["subsections"] @pytest.mark.parametrize( '''readme_md, expected_error''' ,[ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] ,) def __UpperCAmelCase ( __a : List[Any] ,__a : List[Any] ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: _a : int = Path(__a ) / '''README.md''' with open(__a ,'''w+''' ) as readme_file: readme_file.write(__a ) _a : Optional[int] = expected_error.format(path=__a ) with pytest.raises(__a ,match=re.escape(__a ) ): _a : Any = ReadMe.from_readme(__a ,__a ) readme.validate() @pytest.mark.parametrize( '''readme_md, expected_error''' ,[ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] ,) def __UpperCAmelCase ( __a : str ,__a : Union[str, Any] ) -> Dict: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: _a : Optional[Any] = Path(__a ) / '''README.md''' with open(__a ,'''w+''' ) as readme_file: readme_file.write(__a ) _a : str = expected_error.format(path=__a ) with pytest.raises(__a ,match=re.escape(__a ) ): ReadMe.from_readme(__a ,__a ) @pytest.mark.parametrize( '''readme_md,''' ,[ (README_MULTIPLE_SAME_HEADING_1), ] ,) def __UpperCAmelCase ( __a : Optional[Any] ) -> str: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: _a : int = Path(__a ) / '''README.md''' with open(__a ,'''w+''' ) as readme_file: readme_file.write(__a ) ReadMe.from_readme(__a ,__a ,suppress_parsing_errors=__a )
15
0
from __future__ import annotations from collections import Counter from random import random class UpperCAmelCase_ : """simple docstring""" def __init__( self ) -> int: _a : str = {} def __lowercase ( self , _a ) -> None: _a : str = {} def __lowercase ( self , _a , _a , _a ) -> None: if nodea not in self.connections: self.add_node(UpperCamelCase__ ) if nodea not in self.connections: self.add_node(UpperCamelCase__ ) _a : Any = probability def __lowercase ( self ) -> list[str]: return list(self.connections ) def __lowercase ( self , _a ) -> str: _a : int = 0 _a : Optional[int] = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def __UpperCAmelCase ( __a : List[Any] ,__a : Union[str, Any] ,__a : str ) -> dict[str, int]: """simple docstring""" _a : List[Any] = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) _a : List[str] = Counter(graph.get_nodes() ) _a : Any = start for _ in range(SCREAMING_SNAKE_CASE__ ): _a : Any = graph.transition(SCREAMING_SNAKE_CASE__ ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
366
from __future__ import annotations def __UpperCAmelCase ( __a : list ) -> float: """simple docstring""" if not nums: raise ValueError('''List is empty''' ) return sum(__a ) / len(__a ) if __name__ == "__main__": import doctest doctest.testmod()
15
0
import argparse import json import os from collections import OrderedDict import numpy as np import tensorflow as tf import torch def __UpperCAmelCase ( __a : int ) -> List[str]: """simple docstring""" _a : List[str] = os.path.join(args.tf_model_dir ,'''parameters.json''' ) _a : Optional[int] = json.loads(open(lowerCamelCase_ ).read() ) if not params: raise ValueError( F"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" ) if not args.output.endswith('''.pt''' ): _a : List[str] = args.output + '.pt' _a : Tuple = OrderedDict() with tf.device('''/CPU:0''' ): _a : Any = tf.train.load_checkpoint(args.tf_model_dir ) _a : List[str] = reader.get_variable_to_shape_map() for key_name in shapes.keys(): _a : Union[str, Any] = reader.get_tensor(lowerCamelCase_ ).astype(np.floataa ) if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ): continue if key_name.startswith('''pasts/''' ): if key_name.startswith('''pasts/mlp''' ): _a : Optional[int] = int(key_name[9] ) elif key_name.startswith('''pasts/out''' ): _a : int = 8 _a : List[Any] = 'model.sqout.%d.weight' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time _a : Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _a : List[Any] = torch.tensor(lowerCamelCase_ ) elif key_name.startswith('''model/moe''' ): _a : List[str] = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/switch_gating/kernel''' ): _a : Union[str, Any] = 'model.blocks.%d.feed_forward.mlp.router.classifier.weight' % player _a : Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _a : Any = torch.tensor(lowerCamelCase_ ) elif key_name.endswith('''/softmlp/kernel''' ): _a : Any = 'model.blocks.%d.feed_forward.soft_bypass_mlp.weight' % player _a : List[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _a : List[str] = torch.tensor(lowerCamelCase_ ) elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ): _a : List[Any] = key_name[-9:-7] for i in range(16 ): _a : Tuple = 'model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight' % (player, i, nlayer) _a : List[str] = ( vnp[i].transpose([1, 0] ).copy() ) # In Mesh-Tensorflow, it is one array, so it is divided _a : Optional[int] = torch.tensor(lowerCamelCase_ ) elif key_name.startswith('''model/mlp''' ): _a : Optional[int] = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/p1/kernel''' ): _a : Dict = 'model.blocks.%d.feed_forward.mlp.wi.weight' % player _a : Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _a : Any = torch.tensor(lowerCamelCase_ ) elif key_name.endswith('''/p1/bias''' ): _a : Optional[int] = 'model.blocks.%d.feed_forward.mlp.wi.bias' % player _a : int = vnp.copy() # same because it is one dimensional _a : Optional[int] = torch.tensor(lowerCamelCase_ ) elif key_name.endswith('''/p2/kernel''' ): _a : List[str] = 'model.blocks.%d.feed_forward.mlp.wo.weight' % player _a : Union[str, Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _a : Union[str, Any] = torch.tensor(lowerCamelCase_ ) elif key_name.endswith('''/p2/bias''' ): _a : Tuple = 'model.blocks.%d.feed_forward.mlp.wo.bias' % player _a : Optional[int] = vnp.copy() # same because it is one dimensional _a : int = torch.tensor(lowerCamelCase_ ) elif key_name.startswith('''model/ln''' ): _a : Union[str, Any] = int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): _a : List[str] = 'model.blocks.%d.feed_forward.norm.bias' % player _a : Optional[Any] = vnp.copy() # same because it is one dimensional _a : Dict = torch.tensor(lowerCamelCase_ ) elif key_name.endswith('''/g''' ): _a : Tuple = 'model.blocks.%d.feed_forward.norm.weight' % player _a : str = vnp.copy() # same because it is one dimensional _a : str = torch.tensor(lowerCamelCase_ ) elif key_name.startswith('''model/att''' ): _a : Dict = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/qkv/kernel''' ): _a : Optional[int] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum _a : Union[str, Any] = state[:, 0, :, :] _a : str = state[:, 1, :, :] _a : Any = state[:, 2, :, :] _a : Optional[int] = ( state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix _a : Union[str, Any] = ( state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix _a : Optional[int] = ( state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix _a : Optional[Any] = 'model.blocks.%d.self_attn.self_attn.q_proj.weight' % player _a : Any = torch.tensor(lowerCamelCase_ ) _a : Tuple = 'model.blocks.%d.self_attn.self_attn.k_proj.weight' % player _a : Optional[int] = torch.tensor(lowerCamelCase_ ) _a : Optional[int] = 'model.blocks.%d.self_attn.self_attn.v_proj.weight' % player _a : Dict = torch.tensor(lowerCamelCase_ ) elif key_name.endswith('''/o/kernel''' ): _a : Optional[Any] = 'model.blocks.%d.self_attn.self_attn.out_proj.weight' % player _a : Tuple = ( vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy() ) # Mesh-Tensorflow is a diagonal matrix _a : Optional[Any] = torch.tensor(lowerCamelCase_ ) elif key_name.startswith('''model/an''' ): _a : Tuple = int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): _a : Optional[Any] = 'model.blocks.%d.self_attn.norm.bias' % player _a : Dict = vnp.copy() # same because it is one dimensional _a : Optional[int] = torch.tensor(lowerCamelCase_ ) elif key_name.endswith('''/g''' ): _a : Optional[Any] = 'model.blocks.%d.self_attn.norm.weight' % player _a : int = vnp.copy() # same because it is one dimensional _a : List[str] = torch.tensor(lowerCamelCase_ ) elif ( key_name.startswith('''model/wte''' ) or key_name.startswith('''model/wpe''' ) or key_name.startswith('''model/ete''' ) ): _a : Optional[int] = {'wte': 'embed_tokens', 'wpe': 'position_embeddings', 'ete': 'extra_position_embeddings'}[ key_name[-3:] ] _a : str = 'model.%s.weight' % nlayer _a : Optional[Any] = vnp.copy() # same in embedded _a : List[str] = torch.tensor(lowerCamelCase_ ) if key_name.startswith('''model/wte''' ): _a : Dict = 'lm_head.weight' _a : Optional[int] = vnp.copy() # same in embedded _a : Tuple = torch.tensor(lowerCamelCase_ ) elif key_name.startswith('''model/wob''' ): _a : Union[str, Any] = 'final_logits_bias' _a : Tuple = vnp.copy() # same in embedded _a : Union[str, Any] = state.reshape((1, -1) ) _a : List[Any] = torch.tensor(lowerCamelCase_ ) elif key_name == "model/dense/kernel": _a : Dict = 'model.last_project.weight' _a : str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _a : Optional[int] = torch.tensor(lowerCamelCase_ ) elif key_name == "model/dense_1/bias": _a : Optional[Any] = 'model.last_project.bias' _a : Optional[Any] = vnp.copy() # same because it is one dimensional _a : Optional[int] = torch.tensor(lowerCamelCase_ ) torch.save(lowerCamelCase_ ,args.output ) if __name__ == "__main__": a__ = argparse.ArgumentParser( description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''') parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''') a__ = parser.parse_args() convert_tf_gptsan_to_pt(args)
367
import argparse import os import torch from transformers.utils import WEIGHTS_NAME a__ = ['''small''', '''medium''', '''large'''] a__ = '''lm_head.decoder.weight''' a__ = '''lm_head.weight''' def __UpperCAmelCase ( __a : str ,__a : str ) -> List[str]: """simple docstring""" _a : Any = torch.load(__a ) _a : List[str] = d.pop(__a ) os.makedirs(__a ,exist_ok=__a ) torch.save(__a ,os.path.join(__a ,__a ) ) if __name__ == "__main__": a__ = argparse.ArgumentParser() parser.add_argument('''--dialogpt_path''', default='''.''', type=str) a__ = parser.parse_args() for MODEL in DIALOGPT_MODELS: a__ = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''') a__ = f'''./DialoGPT-{MODEL}''' convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
15
0
import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class UpperCAmelCase_ ( __a ): """simple docstring""" def __init__( self ) -> Dict: _a : List[str] = [] def __lowercase ( self , _a , _a , _a , **_a ) -> Optional[Any]: self.events.append('''on_init_end''' ) def __lowercase ( self , _a , _a , _a , **_a ) -> int: self.events.append('''on_train_begin''' ) def __lowercase ( self , _a , _a , _a , **_a ) -> Optional[Any]: self.events.append('''on_train_end''' ) def __lowercase ( self , _a , _a , _a , **_a ) -> Dict: self.events.append('''on_epoch_begin''' ) def __lowercase ( self , _a , _a , _a , **_a ) -> str: self.events.append('''on_epoch_end''' ) def __lowercase ( self , _a , _a , _a , **_a ) -> Optional[Any]: self.events.append('''on_step_begin''' ) def __lowercase ( self , _a , _a , _a , **_a ) -> List[Any]: self.events.append('''on_step_end''' ) def __lowercase ( self , _a , _a , _a , **_a ) -> Optional[Any]: self.events.append('''on_evaluate''' ) def __lowercase ( self , _a , _a , _a , **_a ) -> Dict: self.events.append('''on_predict''' ) def __lowercase ( self , _a , _a , _a , **_a ) -> Optional[int]: self.events.append('''on_save''' ) def __lowercase ( self , _a , _a , _a , **_a ) -> List[Any]: self.events.append('''on_log''' ) def __lowercase ( self , _a , _a , _a , **_a ) -> Optional[int]: self.events.append('''on_prediction_step''' ) @require_torch class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __lowercase ( self ) -> str: _a : Optional[Any] = tempfile.mkdtemp() def __lowercase ( self ) -> List[str]: shutil.rmtree(self.output_dir ) def __lowercase ( self , _a=0 , _a=0 , _a=6_4 , _a=6_4 , _a=None , _a=False , **_a ) -> Union[str, Any]: _a : Optional[int] = RegressionDataset(length=UpperCamelCase__ ) _a : str = RegressionDataset(length=UpperCamelCase__ ) _a : Tuple = RegressionModelConfig(a=UpperCamelCase__ , b=UpperCamelCase__ ) _a : int = RegressionPreTrainedModel(UpperCamelCase__ ) _a : Optional[Any] = TrainingArguments(self.output_dir , disable_tqdm=UpperCamelCase__ , report_to=[] , **UpperCamelCase__ ) return Trainer( UpperCamelCase__ , UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , callbacks=UpperCamelCase__ , ) def __lowercase ( self , _a , _a ) -> Any: self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) ) # Order doesn't matter _a : Optional[Any] = sorted(UpperCamelCase__ , key=lambda _a : cb.__name__ if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cb.__class__.__name__ ) _a : int = sorted(UpperCamelCase__ , key=lambda _a : cb.__name__ if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cb.__class__.__name__ ) for cba, cba in zip(UpperCamelCase__ , UpperCamelCase__ ): if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and isinstance(UpperCamelCase__ , UpperCamelCase__ ): self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) elif isinstance(UpperCamelCase__ , UpperCamelCase__ ) and not isinstance(UpperCamelCase__ , UpperCamelCase__ ): self.assertEqual(UpperCamelCase__ , cba.__class__ ) elif not isinstance(UpperCamelCase__ , UpperCamelCase__ ) and isinstance(UpperCamelCase__ , UpperCamelCase__ ): self.assertEqual(cba.__class__ , UpperCamelCase__ ) else: self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) def __lowercase ( self , _a ) -> List[Any]: _a : List[Any] = ['''on_init_end''', '''on_train_begin'''] _a : int = 0 _a : Optional[Any] = len(trainer.get_eval_dataloader() ) _a : str = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate'''] for _ in range(trainer.state.num_train_epochs ): expected_events.append('''on_epoch_begin''' ) for _ in range(UpperCamelCase__ ): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append('''on_log''' ) if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append('''on_save''' ) expected_events.append('''on_epoch_end''' ) if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def __lowercase ( self ) -> List[str]: _a : List[Any] = self.get_trainer() _a : List[Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase__ ) # Callbacks passed at init are added to the default callbacks _a : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] ) expected_callbacks.append(UpperCamelCase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase__ ) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback _a : int = self.get_trainer(disable_tqdm=UpperCamelCase__ ) _a : Union[str, Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase__ ) def __lowercase ( self ) -> int: _a : int = DEFAULT_CALLBACKS.copy() + [ProgressCallback] _a : Tuple = self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(UpperCamelCase__ ) expected_callbacks.remove(UpperCamelCase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase__ ) _a : str = self.get_trainer() _a : List[Any] = trainer.pop_callback(UpperCamelCase__ ) self.assertEqual(cb.__class__ , UpperCamelCase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase__ ) trainer.add_callback(UpperCamelCase__ ) expected_callbacks.insert(0 , UpperCamelCase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase__ ) # We can also add, pop, or remove by instance _a : int = self.get_trainer() _a : Optional[Any] = trainer.callback_handler.callbacks[0] trainer.remove_callback(UpperCamelCase__ ) expected_callbacks.remove(UpperCamelCase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase__ ) _a : Dict = self.get_trainer() _a : Optional[Any] = trainer.callback_handler.callbacks[0] _a : Any = trainer.pop_callback(UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase__ ) trainer.add_callback(UpperCamelCase__ ) expected_callbacks.insert(0 , UpperCamelCase__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase__ ) def __lowercase ( self ) -> Dict: import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action='''ignore''' , category=UpperCamelCase__ ) _a : int = self.get_trainer(callbacks=[MyTestTrainerCallback] ) trainer.train() _a : List[Any] = trainer.callback_handler.callbacks[-2].events self.assertEqual(UpperCamelCase__ , self.get_expected_events(UpperCamelCase__ ) ) # Independent log/save/eval _a : str = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 ) trainer.train() _a : int = trainer.callback_handler.callbacks[-2].events self.assertEqual(UpperCamelCase__ , self.get_expected_events(UpperCamelCase__ ) ) _a : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 ) trainer.train() _a : int = trainer.callback_handler.callbacks[-2].events self.assertEqual(UpperCamelCase__ , self.get_expected_events(UpperCamelCase__ ) ) _a : str = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='''steps''' ) trainer.train() _a : Tuple = trainer.callback_handler.callbacks[-2].events self.assertEqual(UpperCamelCase__ , self.get_expected_events(UpperCamelCase__ ) ) _a : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='''epoch''' ) trainer.train() _a : Union[str, Any] = trainer.callback_handler.callbacks[-2].events self.assertEqual(UpperCamelCase__ , self.get_expected_events(UpperCamelCase__ ) ) # A bit of everything _a : Dict = self.get_trainer( callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy='''steps''' , ) trainer.train() _a : int = trainer.callback_handler.callbacks[-2].events self.assertEqual(UpperCamelCase__ , self.get_expected_events(UpperCamelCase__ ) ) # warning should be emitted for duplicated callbacks with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock: _a : str = self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , ) assert str(UpperCamelCase__ ) in warn_mock.call_args[0][0]
368
import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class UpperCAmelCase_ ( enum.Enum ): """simple docstring""" UpperCAmelCase__ : int = 0 UpperCAmelCase__ : Union[str, Any] = 1 UpperCAmelCase__ : Optional[Any] = 2 @add_end_docstrings(__lowercase ) class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : Optional[Any] = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n " def __init__( self , *_a , **_a ) -> List[str]: super().__init__(*_a , **_a ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. _a : Dict = None if self.model.config.prefix is not None: _a : List[Any] = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. _a : Optional[Any] = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. _a , _a , _a : str = self._sanitize_parameters(prefix=_a , **self._forward_params ) _a : Optional[Any] = {**self._preprocess_params, **preprocess_params} _a : List[Any] = {**self._forward_params, **forward_params} def __lowercase ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , **_a , ) -> Optional[int]: _a : List[Any] = {} if prefix is not None: _a : Optional[Any] = prefix if prefix: _a : Dict = self.tokenizer( _a , padding=_a , add_special_tokens=_a , return_tensors=self.framework ) _a : Tuple = prefix_inputs['''input_ids'''].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( F"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected""" ''' [None, \'hole\']''' ) _a : Dict = handle_long_generation preprocess_params.update(_a ) _a : Tuple = generate_kwargs _a : Any = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' ) if return_tensors is not None: raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' ) _a : List[str] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' ) _a : Any = ReturnType.TENSORS if return_type is not None: _a : Any = return_type if clean_up_tokenization_spaces is not None: _a : List[Any] = clean_up_tokenization_spaces if stop_sequence is not None: _a : Tuple = self.tokenizer.encode(_a , add_special_tokens=_a ) if len(_a ) > 1: warnings.warn( '''Stopping on a multiple token sequence is not yet supported on transformers. The first token of''' ''' the stop sequence will be used as the stop sequence string in the interim.''' ) _a : List[Any] = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def __lowercase ( self , *_a , **_a ) -> Union[str, Any]: # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'''add_space_before_punct_symbol''': True} ) return super()._parse_and_tokenize(*_a , **_a ) def __call__( self , _a , **_a ) -> List[str]: return super().__call__(_a , **_a ) def __lowercase ( self , _a , _a="" , _a=None , **_a ) -> List[Any]: _a : Optional[int] = self.tokenizer( prefix + prompt_text , padding=_a , add_special_tokens=_a , return_tensors=self.framework ) _a : Union[str, Any] = prompt_text if handle_long_generation == "hole": _a : List[str] = inputs['''input_ids'''].shape[-1] if "max_new_tokens" in generate_kwargs: _a : int = generate_kwargs['''max_new_tokens'''] else: _a : List[Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError('''We cannot infer how many new tokens are expected''' ) if cur_len + new_tokens > self.tokenizer.model_max_length: _a : List[str] = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( '''We cannot use `hole` to handle this generation the number of desired tokens exceeds the''' ''' models max length''' ) _a : List[Any] = inputs['''input_ids'''][:, -keep_length:] if "attention_mask" in inputs: _a : List[str] = inputs['''attention_mask'''][:, -keep_length:] return inputs def __lowercase ( self , _a , **_a ) -> Optional[int]: _a : Any = model_inputs['''input_ids'''] _a : Optional[Any] = model_inputs.get('''attention_mask''' , _a ) # Allow empty prompts if input_ids.shape[1] == 0: _a : int = None _a : int = None _a : List[str] = 1 else: _a : List[Any] = input_ids.shape[0] _a : Union[str, Any] = model_inputs.pop('''prompt_text''' ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. _a : int = generate_kwargs.pop('''prefix_length''' , 0 ) if prefix_length > 0: _a : Tuple = '''max_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].max_new_tokens is not None ) if not has_max_new_tokens: _a : int = generate_kwargs.get('''max_length''' ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length _a : Dict = '''min_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL _a : Optional[Any] = self.model.generate(input_ids=_a , attention_mask=_a , **_a ) _a : int = generated_sequence.shape[0] if self.framework == "pt": _a : Tuple = generated_sequence.reshape(_a , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": _a : List[Any] = tf.reshape(_a , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def __lowercase ( self , _a , _a=ReturnType.FULL_TEXT , _a=True ) -> int: _a : Tuple = model_outputs['''generated_sequence'''][0] _a : int = model_outputs['''input_ids'''] _a : Any = model_outputs['''prompt_text'''] _a : Any = generated_sequence.numpy().tolist() _a : Any = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: _a : Optional[int] = {'''generated_token_ids''': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text _a : str = self.tokenizer.decode( _a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: _a : Union[str, Any] = 0 else: _a : str = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , ) ) if return_type == ReturnType.FULL_TEXT: _a : str = prompt_text + text[prompt_length:] else: _a : List[str] = text[prompt_length:] _a : Union[str, Any] = {'''generated_text''': all_text} records.append(_a ) return records
15
0
def __UpperCAmelCase ( __a : str ,__a : str ) -> List[Any]: """simple docstring""" def get_matched_characters(__a : str ,__a : str ) -> str: _a : Optional[int] = [] _a : List[str] = min(len(_stra ) ,len(_stra ) ) // 2 for i, l in enumerate(_stra ): _a : Dict = int(max(0 ,i - limit ) ) _a : List[Any] = int(min(i + limit + 1 ,len(_stra ) ) ) if l in _stra[left:right]: matched.append(snake_case_ ) _a : List[Any] = F"""{_stra[0:_stra.index(snake_case_ )]} {_stra[_stra.index(snake_case_ ) + 1:]}""" return "".join(snake_case_ ) # matching characters _a : Optional[Any] = get_matched_characters(snake_case_ ,snake_case_ ) _a : int = get_matched_characters(snake_case_ ,snake_case_ ) _a : str = len(snake_case_ ) # transposition _a : List[str] = ( len([(ca, ca) for ca, ca in zip(snake_case_ ,snake_case_ ) if ca != ca] ) // 2 ) if not match_count: _a : List[str] = 0.0 else: _a : Tuple = ( 1 / 3 * ( match_count / len(snake_case_ ) + match_count / len(snake_case_ ) + (match_count - transpositions) / match_count ) ) # common prefix up to 4 characters _a : List[Any] = 0 for ca, ca in zip(stra[:4] ,stra[:4] ): if ca == ca: prefix_len += 1 else: break return jaro + 0.1 * prefix_len * (1 - jaro) if __name__ == "__main__": import doctest doctest.testmod() print(jaro_winkler('''hello''', '''world'''))
369
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.test_utils import execute_subprocess_async def __UpperCAmelCase ( __a : Dict=None ) -> str: """simple docstring""" if subparsers is not None: _a : Union[str, Any] = subparsers.add_parser('''test''' ) else: _a : List[str] = argparse.ArgumentParser('''Accelerate test command''' ) parser.add_argument( '''--config_file''' ,default=__a ,help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ) ,) if subparsers is not None: parser.set_defaults(func=__a ) return parser def __UpperCAmelCase ( __a : List[Any] ) -> Union[str, Any]: """simple docstring""" _a : Dict = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] ) if args.config_file is None: _a : List[Any] = script_name else: _a : Union[str, Any] = F"""--config_file={args.config_file} {script_name}""" _a : str = ['''accelerate-launch'''] + test_args.split() _a : str = execute_subprocess_async(__a ,env=os.environ.copy() ) if result.returncode == 0: print('''Test is a success! You are ready for your distributed training!''' ) def __UpperCAmelCase ( ) -> List[Any]: """simple docstring""" _a : Optional[int] = test_command_parser() _a : List[Any] = parser.parse_args() test_command(__a ) if __name__ == "__main__": main()
15
0
# flake8: noqa # Lint as: python3 a__ = [ '''VerificationMode''', '''Version''', '''disable_progress_bar''', '''enable_progress_bar''', '''is_progress_bar_enabled''', '''experimental''', ] from .info_utils import VerificationMode from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled from .version import Version from .experimental import experimental
370
import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __lowercase ( self ) -> Union[str, Any]: _a : Optional[Any] = tempfile.mkdtemp() # fmt: off _a : Optional[int] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest'''] # fmt: on _a : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) _a : Any = { '''do_resize''': True, '''size''': {'''height''': 1_8, '''width''': 1_8}, '''do_normalize''': True, '''image_mean''': [0.5, 0.5, 0.5], '''image_std''': [0.5, 0.5, 0.5], } _a : str = os.path.join(self.tmpdirname , _a ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(_a , _a ) def __lowercase ( self , **_a ) -> Any: return BertTokenizer.from_pretrained(self.tmpdirname , **_a ) def __lowercase ( self , **_a ) -> str: return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a ) def __lowercase ( self ) -> List[Any]: shutil.rmtree(self.tmpdirname ) def __lowercase ( self ) -> Any: _a : Union[str, Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] _a : Tuple = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs] return image_inputs def __lowercase ( self ) -> str: _a : List[str] = self.get_tokenizer() _a : Tuple = self.get_image_processor() _a : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) processor.save_pretrained(self.tmpdirname ) _a : Dict = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , _a ) def __lowercase ( self ) -> Dict: _a : List[str] = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _a : Any = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) _a : List[Any] = self.get_image_processor(do_normalize=_a , padding_value=1.0 ) _a : Dict = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_a , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _a ) def __lowercase ( self ) -> Any: _a : Dict = self.get_image_processor() _a : str = self.get_tokenizer() _a : int = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) _a : List[str] = self.prepare_image_inputs() _a : List[Any] = image_processor(_a , return_tensors='''np''' ) _a : Dict = processor(images=_a , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __lowercase ( self ) -> List[str]: _a : Union[str, Any] = self.get_image_processor() _a : Dict = self.get_tokenizer() _a : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) _a : Tuple = '''lower newer''' _a : int = processor(text=_a ) _a : str = tokenizer(_a ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __lowercase ( self ) -> List[Any]: _a : Any = self.get_image_processor() _a : str = self.get_tokenizer() _a : Tuple = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) _a : List[Any] = '''lower newer''' _a : Union[str, Any] = self.prepare_image_inputs() _a : Any = processor(text=_a , images=_a ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with self.assertRaises(_a ): processor() def __lowercase ( self ) -> Optional[int]: _a : Union[str, Any] = self.get_image_processor() _a : List[str] = self.get_tokenizer() _a : Any = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) _a : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _a : int = processor.batch_decode(_a ) _a : int = tokenizer.batch_decode(_a ) self.assertListEqual(_a , _a ) def __lowercase ( self ) -> List[Any]: _a : Tuple = self.get_image_processor() _a : List[str] = self.get_tokenizer() _a : str = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) _a : Optional[int] = '''lower newer''' _a : Dict = self.prepare_image_inputs() _a : Any = processor(text=_a , images=_a ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
15
0
import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal a__ = datasets.utils.logging.get_logger(__name__) a__ = ['''names''', '''prefix'''] a__ = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols'''] a__ = ['''encoding_errors''', '''on_bad_lines'''] a__ = ['''date_format'''] @dataclass class UpperCAmelCase_ ( datasets.BuilderConfig ): """simple docstring""" UpperCAmelCase__ : str = "," UpperCAmelCase__ : Optional[str] = None UpperCAmelCase__ : Optional[Union[int, List[int], str]] = "infer" UpperCAmelCase__ : Optional[List[str]] = None UpperCAmelCase__ : Optional[List[str]] = None UpperCAmelCase__ : Optional[Union[int, str, List[int], List[str]]] = None UpperCAmelCase__ : Optional[Union[List[int], List[str]]] = None UpperCAmelCase__ : Optional[str] = None UpperCAmelCase__ : bool = True UpperCAmelCase__ : Optional[Literal["c", "python", "pyarrow"]] = None UpperCAmelCase__ : Dict[Union[int, str], Callable[[Any], Any]] = None UpperCAmelCase__ : Optional[list] = None UpperCAmelCase__ : Optional[list] = None UpperCAmelCase__ : bool = False UpperCAmelCase__ : Optional[Union[int, List[int]]] = None UpperCAmelCase__ : Optional[int] = None UpperCAmelCase__ : Optional[Union[str, List[str]]] = None UpperCAmelCase__ : bool = True UpperCAmelCase__ : bool = True UpperCAmelCase__ : bool = False UpperCAmelCase__ : bool = True UpperCAmelCase__ : Optional[str] = None UpperCAmelCase__ : str = "." UpperCAmelCase__ : Optional[str] = None UpperCAmelCase__ : str = '"' UpperCAmelCase__ : int = 0 UpperCAmelCase__ : Optional[str] = None UpperCAmelCase__ : Optional[str] = None UpperCAmelCase__ : Optional[str] = None UpperCAmelCase__ : Optional[str] = None UpperCAmelCase__ : bool = True UpperCAmelCase__ : bool = True UpperCAmelCase__ : int = 0 UpperCAmelCase__ : bool = True UpperCAmelCase__ : bool = False UpperCAmelCase__ : Optional[str] = None UpperCAmelCase__ : int = 10000 UpperCAmelCase__ : Optional[datasets.Features] = None UpperCAmelCase__ : Optional[str] = "strict" UpperCAmelCase__ : Literal["error", "warn", "skip"] = "error" UpperCAmelCase__ : Optional[str] = None def __lowercase ( self ) -> Any: if self.delimiter is not None: _a : int = self.delimiter if self.column_names is not None: _a : List[str] = self.column_names @property def __lowercase ( self ) -> Optional[Any]: _a : Any = { 'sep': self.sep, 'header': self.header, 'names': self.names, 'index_col': self.index_col, 'usecols': self.usecols, 'prefix': self.prefix, 'mangle_dupe_cols': self.mangle_dupe_cols, 'engine': self.engine, 'converters': self.converters, 'true_values': self.true_values, 'false_values': self.false_values, 'skipinitialspace': self.skipinitialspace, 'skiprows': self.skiprows, 'nrows': self.nrows, 'na_values': self.na_values, 'keep_default_na': self.keep_default_na, 'na_filter': self.na_filter, 'verbose': self.verbose, 'skip_blank_lines': self.skip_blank_lines, 'thousands': self.thousands, 'decimal': self.decimal, 'lineterminator': self.lineterminator, 'quotechar': self.quotechar, 'quoting': self.quoting, 'escapechar': self.escapechar, 'comment': self.comment, 'encoding': self.encoding, 'dialect': self.dialect, 'error_bad_lines': self.error_bad_lines, 'warn_bad_lines': self.warn_bad_lines, 'skipfooter': self.skipfooter, 'doublequote': self.doublequote, 'memory_map': self.memory_map, 'float_precision': self.float_precision, 'chunksize': self.chunksize, 'encoding_errors': self.encoding_errors, 'on_bad_lines': self.on_bad_lines, 'date_format': self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , _A ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class UpperCAmelCase_ ( datasets.ArrowBasedBuilder ): """simple docstring""" UpperCAmelCase__ : Tuple = CsvConfig def __lowercase ( self ) -> str: return datasets.DatasetInfo(features=self.config.features ) def __lowercase ( self , _a ) -> str: if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) _a : Union[str, Any] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_A , (str, list, tuple) ): _a : Tuple = data_files if isinstance(_A , _A ): _a : int = [files] _a : Union[str, Any] = [dl_manager.iter_files(_A ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] _a : List[str] = [] for split_name, files in data_files.items(): if isinstance(_A , _A ): _a : Optional[Any] = [files] _a : int = [dl_manager.iter_files(_A ) for file in files] splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'''files''': files} ) ) return splits def __lowercase ( self , _a ) -> pa.Table: if self.config.features is not None: _a : List[str] = self.config.features.arrow_schema if all(not require_storage_cast(_A ) for feature in self.config.features.values() ): # cheaper cast _a : List[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=_A ) else: # more expensive cast; allows str <-> int/float or str to Audio for example _a : str = table_cast(_A , _A ) return pa_table def __lowercase ( self , _a ) -> List[str]: _a : List[Any] = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str _a : Any = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(_A ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(_A ) ): _a : List[str] = pd.read_csv(_A , iterator=_A , dtype=_A , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(_A ): _a : str = pa.Table.from_pandas(_A ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(_A ) except ValueError as e: logger.error(F"""Failed to read file '{file}' with error {type(_A )}: {e}""" ) raise
371
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( HubertConfig, HubertForCTC, HubertModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() a__ = logging.get_logger(__name__) a__ = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[int] ,__a : Optional[int] ,__a : List[str] ,__a : List[Any] ) -> Union[str, Any]: """simple docstring""" for attribute in key.split('''.''' ): _a : Optional[Any] = getattr(__a ,__a ) if weight_type is not None: _a : Dict = getattr(__a ,__a ).shape else: _a : Optional[int] = hf_pointer.shape assert hf_shape == value.shape, ( F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": _a : List[Any] = value elif weight_type == "weight_g": _a : Any = value elif weight_type == "weight_v": _a : Union[str, Any] = value elif weight_type == "bias": _a : Optional[int] = value else: _a : List[Any] = value logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def __UpperCAmelCase ( __a : Any ,__a : Union[str, Any] ,__a : Union[str, Any] ) -> int: """simple docstring""" _a : Union[str, Any] = [] _a : Union[str, Any] = fairseq_model.state_dict() _a : Union[str, Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): _a : int = False if "conv_layers" in name: load_conv_layer( __a ,__a ,__a ,__a ,hf_model.config.feat_extract_norm == '''group''' ,) _a : Optional[Any] = True else: for key, mapped_key in MAPPING.items(): _a : Union[str, Any] = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned): _a : Any = True if "*" in mapped_key: _a : Optional[int] = name.split(__a )[0].split('''.''' )[-2] _a : Any = mapped_key.replace('''*''' ,__a ) if "weight_g" in name: _a : List[Any] = '''weight_g''' elif "weight_v" in name: _a : List[str] = '''weight_v''' elif "weight" in name: _a : Any = '''weight''' elif "bias" in name: _a : str = '''bias''' else: _a : Any = None set_recursively(__a ,__a ,__a ,__a ,__a ) continue if not is_used: unused_weights.append(__a ) logger.warning(F"""Unused weights: {unused_weights}""" ) def __UpperCAmelCase ( __a : int ,__a : Optional[Any] ,__a : Dict ,__a : List[str] ,__a : Any ) -> Tuple: """simple docstring""" _a : int = full_name.split('''conv_layers.''' )[-1] _a : Any = name.split('''.''' ) _a : List[Any] = int(items[0] ) _a : Optional[int] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) _a : Optional[int] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) _a : Optional[Any] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) _a : int = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) _a : Any = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(__a ) @torch.no_grad() def __UpperCAmelCase ( __a : Dict ,__a : List[Any] ,__a : List[str]=None ,__a : Optional[int]=None ,__a : int=True ) -> List[Any]: """simple docstring""" if config_path is not None: _a : Tuple = HubertConfig.from_pretrained(__a ) else: _a : Any = HubertConfig() if is_finetuned: if dict_path: _a : Tuple = Dictionary.load(__a ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _a : Any = target_dict.pad_index _a : Tuple = target_dict.bos_index _a : Optional[int] = target_dict.eos_index _a : Optional[Any] = len(target_dict.symbols ) _a : Tuple = os.path.join(__a ,'''vocab.json''' ) if not os.path.isdir(__a ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__a ) ) return os.makedirs(__a ,exist_ok=__a ) with open(__a ,'''w''' ,encoding='''utf-8''' ) as vocab_handle: json.dump(target_dict.indices ,__a ) _a : Tuple = WavaVecaCTCTokenizer( __a ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token='''|''' ,do_lower_case=__a ,) _a : Tuple = True if config.feat_extract_norm == '''layer''' else False _a : List[Any] = WavaVecaFeatureExtractor( feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=__a ,return_attention_mask=__a ,) _a : List[Any] = WavaVecaProcessor(feature_extractor=__a ,tokenizer=__a ) processor.save_pretrained(__a ) _a : Tuple = HubertForCTC(__a ) else: _a : Tuple = HubertModel(__a ) if is_finetuned: _a , _a , _a : int = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] ,arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: _a , _a , _a : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) _a : Any = model[0].eval() recursively_load_weights(__a ,__a ,__a ) hf_wavavec.save_pretrained(__a ) if __name__ == "__main__": a__ = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) a__ = parser.parse_args() convert_hubert_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
15
0
from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCAmelCase__ : Dict = ["vqvae"] def __init__( self , _a , _a , _a , _a , ) -> Union[str, Any]: super().__init__() self.register_modules(unet=_a , scheduler=_a , mel=_a , vqvae=_a ) def __lowercase ( self ) -> int: return 5_0 if isinstance(self.scheduler , _a ) else 1_0_0_0 @torch.no_grad() def __call__( self , _a = 1 , _a = None , _a = None , _a = 0 , _a = 0 , _a = None , _a = None , _a = 0 , _a = 0 , _a = None , _a = 0 , _a = None , _a = None , _a=True , ) -> Union[ Union[AudioPipelineOutput, ImagePipelineOutput], Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]], ]: _a : List[str] = steps or self.get_default_steps() self.scheduler.set_timesteps(_a ) _a : str = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: _a : List[Any] = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: _a : List[str] = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=_a , device=self.device , ) _a : Optional[int] = noise _a : int = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(_a , _a ) _a : int = self.mel.audio_slice_to_image(_a ) _a : List[Any] = np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape( (input_image.height, input_image.width) ) _a : List[str] = (input_image / 2_5_5) * 2 - 1 _a : Tuple = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device ) if self.vqvae is not None: _a : Optional[Any] = self.vqvae.encode(torch.unsqueeze(_a , 0 ) ).latent_dist.sample( generator=_a )[0] _a : Tuple = self.vqvae.config.scaling_factor * input_images if start_step > 0: _a : str = self.scheduler.add_noise(_a , _a , self.scheduler.timesteps[start_step - 1] ) _a : Optional[Any] = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) _a : Optional[int] = int(mask_start_secs * pixels_per_second ) _a : Optional[Any] = int(mask_end_secs * pixels_per_second ) _a : Tuple = self.scheduler.add_noise(_a , _a , torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet , _a ): _a : List[str] = self.unet(_a , _a , _a )["""sample"""] else: _a : Dict = self.unet(_a , _a )["""sample"""] if isinstance(self.scheduler , _a ): _a : List[Any] = self.scheduler.step( model_output=_a , timestep=_a , sample=_a , eta=_a , generator=_a , )["""prev_sample"""] else: _a : List[str] = self.scheduler.step( model_output=_a , timestep=_a , sample=_a , generator=_a , )["""prev_sample"""] if mask is not None: if mask_start > 0: _a : List[str] = mask[:, step, :, :mask_start] if mask_end > 0: _a : Optional[int] = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance _a : List[str] = 1 / self.vqvae.config.scaling_factor * images _a : Optional[int] = self.vqvae.decode(_a )["""sample"""] _a : Optional[int] = (images / 2 + 0.5).clamp(0 , 1 ) _a : Any = images.cpu().permute(0 , 2 , 3 , 1 ).numpy() _a : Optional[Any] = (images * 2_5_5).round().astype('''uint8''' ) _a : Optional[Any] = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(_a , mode='''RGB''' ).convert('''L''' ) for _ in images) ) _a : List[str] = [self.mel.image_to_audio(_a ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(_a )[:, np.newaxis, :] ) , **ImagePipelineOutput(_a ) ) @torch.no_grad() def __lowercase ( self , _a , _a = 5_0 ) -> np.ndarray: assert isinstance(self.scheduler , _a ) self.scheduler.set_timesteps(_a ) _a : str = np.array( [np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] ) _a : List[Any] = (sample / 2_5_5) * 2 - 1 _a : Any = torch.Tensor(_a ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ): _a : Optional[Any] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps _a : Optional[Any] = self.scheduler.alphas_cumprod[t] _a : Union[str, Any] = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) _a : int = 1 - alpha_prod_t _a : List[str] = self.unet(_a , _a )["""sample"""] _a : List[str] = (1 - alpha_prod_t_prev) ** 0.5 * model_output _a : Dict = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) _a : Optional[int] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def __lowercase ( _a , _a , _a ) -> torch.Tensor: _a : Union[str, Any] = acos(torch.dot(torch.flatten(_a ) , torch.flatten(_a ) ) / torch.norm(_a ) / torch.norm(_a ) ) return sin((1 - alpha) * theta ) * xa / sin(_a ) + sin(alpha * theta ) * xa / sin(_a )
350
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : List[str] = ["image_processor", "tokenizer"] UpperCAmelCase__ : str = "ViltImageProcessor" UpperCAmelCase__ : Union[str, Any] = ("BertTokenizer", "BertTokenizerFast") def __init__( self , _a=None , _a=None , **_a ) -> Any: _a : Union[str, Any] = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , _a , ) _a : Dict = kwargs.pop('''feature_extractor''' ) _a : Optional[int] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(_a , _a ) _a : int = self.image_processor def __call__( self , _a , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ) -> BatchEncoding: _a : Tuple = self.tokenizer( text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , ) # add pixel_values + pixel_mask _a : str = self.image_processor(_a , return_tensors=_a ) encoding.update(_a ) return encoding def __lowercase ( self , *_a , **_a ) -> Optional[Any]: return self.tokenizer.batch_decode(*_a , **_a ) def __lowercase ( self , *_a , **_a ) -> str: return self.tokenizer.decode(*_a , **_a ) @property def __lowercase ( self ) -> Optional[int]: _a : str = self.tokenizer.model_input_names _a : Optional[Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def __lowercase ( self ) -> Optional[Any]: warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , ) return self.image_processor_class @property def __lowercase ( self ) -> Any: warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , ) return self.image_processor
15
0
import importlib import inspect import json import os import re import shutil import sys from pathlib import Path from typing import Dict, Optional, Union from urllib import request from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info from packaging import version from .. import __version__ from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging a__ = ( '''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py''' ) a__ = logging.get_logger(__name__) # pylint: disable=invalid-name def __UpperCAmelCase ( ) -> Dict: """simple docstring""" _a : Dict = '''https://pypi.org/pypi/diffusers/json''' _a : Union[str, Any] = json.loads(request.urlopen(lowerCAmelCase__ ).read() )['''releases'''].keys() return sorted(lowerCAmelCase__ ,key=lambda __a : version.Version(lowerCAmelCase__ ) ) def __UpperCAmelCase ( ) -> str: """simple docstring""" if HF_MODULES_CACHE in sys.path: return sys.path.append(lowerCAmelCase__ ) os.makedirs(lowerCAmelCase__ ,exist_ok=lowerCAmelCase__ ) _a : Optional[Any] = Path(lowerCAmelCase__ ) / '''__init__.py''' if not init_path.exists(): init_path.touch() def __UpperCAmelCase ( __a : Union[str, os.PathLike] ) -> int: """simple docstring""" init_hf_modules() _a : str = Path(lowerCAmelCase__ ) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent ) os.makedirs(lowerCAmelCase__ ,exist_ok=lowerCAmelCase__ ) _a : List[Any] = dynamic_module_path / '''__init__.py''' if not init_path.exists(): init_path.touch() def __UpperCAmelCase ( __a : List[Any] ) -> str: """simple docstring""" with open(lowerCAmelCase__ ,'''r''' ,encoding='''utf-8''' ) as f: _a : Any = f.read() # Imports of the form `import .xxx` _a : Any = re.findall('''^\s*import\s+\.(\S+)\s*$''' ,lowerCAmelCase__ ,flags=re.MULTILINE ) # Imports of the form `from .xxx import yyy` relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' ,lowerCAmelCase__ ,flags=re.MULTILINE ) # Unique-ify return list(set(lowerCAmelCase__ ) ) def __UpperCAmelCase ( __a : Dict ) -> Any: """simple docstring""" _a : List[str] = False _a : str = [module_file] _a : List[str] = [] # Let's recurse through all relative imports while not no_change: _a : Dict = [] for f in files_to_check: new_imports.extend(get_relative_imports(lowerCAmelCase__ ) ) _a : str = Path(lowerCAmelCase__ ).parent _a : Dict = [str(module_path / m ) for m in new_imports] _a : Optional[Any] = [f for f in new_import_files if f not in all_relative_imports] _a : Dict = [F"""{f}.py""" for f in new_import_files] _a : List[Any] = len(lowerCAmelCase__ ) == 0 all_relative_imports.extend(lowerCAmelCase__ ) return all_relative_imports def __UpperCAmelCase ( __a : Dict ) -> int: """simple docstring""" with open(lowerCAmelCase__ ,'''r''' ,encoding='''utf-8''' ) as f: _a : Union[str, Any] = f.read() # Imports of the form `import xxx` _a : Optional[Any] = re.findall('''^\s*import\s+(\S+)\s*$''' ,lowerCAmelCase__ ,flags=re.MULTILINE ) # Imports of the form `from xxx import yyy` imports += re.findall('''^\s*from\s+(\S+)\s+import''' ,lowerCAmelCase__ ,flags=re.MULTILINE ) # Only keep the top-level module _a : Optional[Any] = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )] # Unique-ify and test we got them all _a : Union[str, Any] = list(set(lowerCAmelCase__ ) ) _a : List[str] = [] for imp in imports: try: importlib.import_module(lowerCAmelCase__ ) except ImportError: missing_packages.append(lowerCAmelCase__ ) if len(lowerCAmelCase__ ) > 0: raise ImportError( '''This modeling file requires the following packages that were not found in your environment: ''' F"""{', '.join(lowerCAmelCase__ )}. Run `pip install {' '.join(lowerCAmelCase__ )}`""" ) return get_relative_imports(lowerCAmelCase__ ) def __UpperCAmelCase ( __a : int ,__a : Dict ) -> List[Any]: """simple docstring""" _a : List[str] = module_path.replace(os.path.sep ,'''.''' ) _a : Optional[int] = importlib.import_module(lowerCAmelCase__ ) if class_name is None: return find_pipeline_class(lowerCAmelCase__ ) return getattr(lowerCAmelCase__ ,lowerCAmelCase__ ) def __UpperCAmelCase ( __a : str ) -> Union[str, Any]: """simple docstring""" from ..pipelines import DiffusionPipeline _a : Tuple = dict(inspect.getmembers(lowerCAmelCase__ ,inspect.isclass ) ) _a : Optional[Any] = None for cls_name, cls in cls_members.items(): if ( cls_name != DiffusionPipeline.__name__ and issubclass(cls ,lowerCAmelCase__ ) and cls.__module__.split('''.''' )[0] != "diffusers" ): if pipeline_class is not None: raise ValueError( F"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:""" F""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in""" F""" {loaded_module}.""" ) _a : Any = cls return pipeline_class def __UpperCAmelCase ( __a : Union[str, os.PathLike] ,__a : str ,__a : Optional[Union[str, os.PathLike]] = None ,__a : bool = False ,__a : bool = False ,__a : Optional[Dict[str, str]] = None ,__a : Optional[Union[bool, str]] = None ,__a : Optional[str] = None ,__a : bool = False ,) -> Tuple: """simple docstring""" _a : List[Any] = str(lowerCAmelCase__ ) _a : List[Any] = os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ ) if os.path.isfile(lowerCAmelCase__ ): _a : Dict = module_file_or_url _a : List[Any] = '''local''' elif pretrained_model_name_or_path.count('''/''' ) == 0: _a : List[str] = get_diffusers_versions() # cut ".dev0" _a : List[Any] = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] ) # retrieve github version that matches if revision is None: _a : str = latest_version if latest_version[1:] in available_versions else '''main''' logger.info(F"""Defaulting to latest_version: {revision}.""" ) elif revision in available_versions: _a : Union[str, Any] = F"""v{revision}""" elif revision == "main": _a : str = revision else: raise ValueError( F"""`custom_revision`: {revision} does not exist. Please make sure to choose one of""" F""" {', '.join(available_versions + ['main'] )}.""" ) # community pipeline on GitHub _a : Tuple = COMMUNITY_PIPELINES_URL.format(revision=lowerCAmelCase__ ,pipeline=lowerCAmelCase__ ) try: _a : Union[str, Any] = cached_download( lowerCAmelCase__ ,cache_dir=lowerCAmelCase__ ,force_download=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,resume_download=lowerCAmelCase__ ,local_files_only=lowerCAmelCase__ ,use_auth_token=lowerCAmelCase__ ,) _a : List[str] = '''git''' _a : Tuple = pretrained_model_name_or_path + '''.py''' except EnvironmentError: logger.error(F"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" ) raise else: try: # Load from URL or cache if already cached _a : Any = hf_hub_download( lowerCAmelCase__ ,lowerCAmelCase__ ,cache_dir=lowerCAmelCase__ ,force_download=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,resume_download=lowerCAmelCase__ ,local_files_only=lowerCAmelCase__ ,use_auth_token=lowerCAmelCase__ ,) _a : Any = os.path.join('''local''' ,'''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) ) except EnvironmentError: logger.error(F"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" ) raise # Check we have all the requirements in our environment _a : List[Any] = check_imports(lowerCAmelCase__ ) # Now we move the module inside our cached dynamic modules. _a : List[Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule create_dynamic_module(lowerCAmelCase__ ) _a : List[Any] = Path(lowerCAmelCase__ ) / full_submodule if submodule == "local" or submodule == "git": # We always copy local files (we could hash the file to see if there was a change, and give them the name of # that hash, to only copy when there is a modification but it seems overkill for now). # The only reason we do the copy is to avoid putting too many folders in sys.path. shutil.copy(lowerCAmelCase__ ,submodule_path / module_file ) for module_needed in modules_needed: _a : Tuple = F"""{module_needed}.py""" shutil.copy(os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ ) ,submodule_path / module_needed ) else: # Get the commit hash # TODO: we will get this info in the etag soon, so retrieve it from there and not here. if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ): _a : List[str] = use_auth_token elif use_auth_token is True: _a : str = HfFolder.get_token() else: _a : Any = None _a : List[Any] = model_info(lowerCAmelCase__ ,revision=lowerCAmelCase__ ,token=lowerCAmelCase__ ).sha # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the # benefit of versioning. _a : List[str] = submodule_path / commit_hash _a : int = full_submodule + os.path.sep + commit_hash create_dynamic_module(lowerCAmelCase__ ) if not (submodule_path / module_file).exists(): shutil.copy(lowerCAmelCase__ ,submodule_path / module_file ) # Make sure we also have every file with relative for module_needed in modules_needed: if not (submodule_path / module_needed).exists(): get_cached_module_file( lowerCAmelCase__ ,F"""{module_needed}.py""" ,cache_dir=lowerCAmelCase__ ,force_download=lowerCAmelCase__ ,resume_download=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,use_auth_token=lowerCAmelCase__ ,revision=lowerCAmelCase__ ,local_files_only=lowerCAmelCase__ ,) return os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ ) def __UpperCAmelCase ( __a : Union[str, os.PathLike] ,__a : str ,__a : Optional[str] = None ,__a : Optional[Union[str, os.PathLike]] = None ,__a : bool = False ,__a : bool = False ,__a : Optional[Dict[str, str]] = None ,__a : Optional[Union[bool, str]] = None ,__a : Optional[str] = None ,__a : bool = False ,**__a : Optional[int] ,) -> int: """simple docstring""" _a : Dict = get_cached_module_file( lowerCAmelCase__ ,lowerCAmelCase__ ,cache_dir=lowerCAmelCase__ ,force_download=lowerCAmelCase__ ,resume_download=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,use_auth_token=lowerCAmelCase__ ,revision=lowerCAmelCase__ ,local_files_only=lowerCAmelCase__ ,) return get_class_in_module(lowerCAmelCase__ ,final_module.replace('''.py''' ,'''''' ) )
351
from math import ceil def __UpperCAmelCase ( __a : int = 1_001 ) -> int: """simple docstring""" _a : Dict = 1 for i in range(1 ,int(ceil(n / 2.0 ) ) ): _a : int = 2 * i + 1 _a : str = 2 * i _a : Any = total + 4 * odd**2 - 6 * even return total if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution()) else: try: a__ = int(sys.argv[1]) print(solution(n)) except ValueError: print('''Invalid entry - please enter a number''')
15
0
"""simple docstring""" import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging a__ = logging.get_logger(__name__) def __UpperCAmelCase ( __a : int=None ,__a : List[Any]=None ) -> int: """simple docstring""" return field(default_factory=lambda: default ,metadata=__a ) @dataclass class UpperCAmelCase_ : """simple docstring""" UpperCAmelCase__ : List[str] = list_field( default=[] , metadata={ "help": ( "Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version" " of all available models" ) } , ) UpperCAmelCase__ : List[int] = list_field( default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} ) UpperCAmelCase__ : List[int] = list_field( default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , ) UpperCAmelCase__ : bool = field( default=__lowercase , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , ) UpperCAmelCase__ : bool = field( default=__lowercase , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , ) UpperCAmelCase__ : bool = field( default=__lowercase , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} ) UpperCAmelCase__ : bool = field(default=__lowercase , metadata={"help": "Use FP16 to accelerate inference."} ) UpperCAmelCase__ : bool = field(default=__lowercase , metadata={"help": "Benchmark training of model"} ) UpperCAmelCase__ : bool = field(default=__lowercase , metadata={"help": "Verbose memory tracing"} ) UpperCAmelCase__ : bool = field( default=__lowercase , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , ) UpperCAmelCase__ : bool = field( default=__lowercase , metadata={ "help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory" } , ) UpperCAmelCase__ : bool = field(default=__lowercase , metadata={"help": "Trace memory line by line"} ) UpperCAmelCase__ : bool = field(default=__lowercase , metadata={"help": "Save result to a CSV file"} ) UpperCAmelCase__ : bool = field(default=__lowercase , metadata={"help": "Save all print statements in a log file"} ) UpperCAmelCase__ : bool = field(default=__lowercase , metadata={"help": "Whether to print environment information"} ) UpperCAmelCase__ : bool = field( default=__lowercase , metadata={ "help": ( "Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use" " multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled" " for debugging / testing and on TPU." ) } , ) UpperCAmelCase__ : str = field( default=F'''inference_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv."} , ) UpperCAmelCase__ : str = field( default=F'''inference_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv."} , ) UpperCAmelCase__ : str = field( default=F'''train_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv for training."} , ) UpperCAmelCase__ : str = field( default=F'''train_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , ) UpperCAmelCase__ : str = field( default=F'''env_info_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving environment information."} , ) UpperCAmelCase__ : str = field( default=F'''log_{round(time() )}.csv''' , metadata={"help": "Log filename used if print statements are saved in log."} , ) UpperCAmelCase__ : int = field(default=3 , metadata={"help": "Times an experiment will be run."} ) UpperCAmelCase__ : bool = field( default=__lowercase , metadata={ "help": ( "Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain" " model weights." ) } , ) def __lowercase ( self ) -> Tuple: warnings.warn( F"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils""" ''' are deprecated in general and it is advised to use external Benchmarking libraries ''' ''' to benchmark Transformer models.''' , _a , ) def __lowercase ( self ) -> List[str]: return json.dumps(dataclasses.asdict(self ) , indent=2 ) @property def __lowercase ( self ) -> List[str]: if len(self.models ) <= 0: raise ValueError( '''Please make sure you provide at least one model name / model identifier, *e.g.* `--models''' ''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' ) return self.models @property def __lowercase ( self ) -> Union[str, Any]: if not self.multi_process: return False elif self.is_tpu: logger.info('''Multiprocessing is currently not possible on TPU.''' ) return False else: return True
352
from typing import Dict, Iterable, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract a__ = logging.get_logger(__name__) def __UpperCAmelCase ( __a : Union[str, Any] ,__a : str ,__a : Union[str, Any] ) -> List[str]: """simple docstring""" return [ int(1_000 * (box[0] / width) ), int(1_000 * (box[1] / height) ), int(1_000 * (box[2] / width) ), int(1_000 * (box[3] / height) ), ] def __UpperCAmelCase ( __a : np.ndarray ,__a : Optional[str] ,__a : Optional[str] ) -> List[Any]: """simple docstring""" _a : str = to_pil_image(__a ) _a , _a : Optional[Any] = pil_image.size _a : Tuple = pytesseract.image_to_data(__a ,lang=__a ,output_type='''dict''' ,config=__a ) _a , _a , _a , _a , _a : List[str] = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height'''] # filter empty words and corresponding coordinates _a : Dict = [idx for idx, word in enumerate(__a ) if not word.strip()] _a : str = [word for idx, word in enumerate(__a ) if idx not in irrelevant_indices] _a : List[str] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices] _a : Union[str, Any] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices] _a : str = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices] _a : Union[str, Any] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format _a : int = [] for x, y, w, h in zip(__a ,__a ,__a ,__a ): _a : List[str] = [x, y, x + w, y + h] actual_boxes.append(__a ) # finally, normalize the bounding boxes _a : Dict = [] for box in actual_boxes: normalized_boxes.append(normalize_box(__a ,__a ,__a ) ) assert len(__a ) == len(__a ), "Not as many words as there are bounding boxes" return words, normalized_boxes class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : Optional[int] = ["pixel_values"] def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = True , _a = 1 / 2_5_5 , _a = True , _a = None , _a = None , _a = True , _a = None , _a = "" , **_a , ) -> None: super().__init__(**_a ) _a : List[str] = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4} _a : Union[str, Any] = get_size_dict(_a ) _a : int = do_resize _a : Optional[int] = size _a : str = resample _a : str = do_rescale _a : Any = rescale_value _a : Optional[Any] = do_normalize _a : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _a : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD _a : List[Any] = apply_ocr _a : Optional[int] = ocr_lang _a : Tuple = tesseract_config def __lowercase ( self , _a , _a , _a = PILImageResampling.BILINEAR , _a = None , **_a , ) -> np.ndarray: _a : Any = get_size_dict(_a ) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" ) _a : Optional[int] = (size['''height'''], size['''width''']) return resize(_a , size=_a , resample=_a , data_format=_a , **_a ) def __lowercase ( self , _a , _a , _a = None , **_a , ) -> np.ndarray: return rescale(_a , scale=_a , data_format=_a , **_a ) def __lowercase ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray: return normalize(_a , mean=_a , std=_a , data_format=_a , **_a ) def __lowercase ( self , _a , _a = None , _a = None , _a=None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> PIL.Image.Image: _a : Optional[int] = do_resize if do_resize is not None else self.do_resize _a : Union[str, Any] = size if size is not None else self.size _a : Any = get_size_dict(_a ) _a : List[str] = resample if resample is not None else self.resample _a : int = do_rescale if do_rescale is not None else self.do_rescale _a : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor _a : int = do_normalize if do_normalize is not None else self.do_normalize _a : str = image_mean if image_mean is not None else self.image_mean _a : Tuple = image_std if image_std is not None else self.image_std _a : Any = apply_ocr if apply_ocr is not None else self.apply_ocr _a : int = ocr_lang if ocr_lang is not None else self.ocr_lang _a : Optional[int] = tesseract_config if tesseract_config is not None else self.tesseract_config _a : List[Any] = make_list_of_images(_a ) if not valid_images(_a ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' ) # All transformations expect numpy arrays. _a : Any = [to_numpy_array(_a ) for image in images] # Tesseract OCR to get words + normalized bounding boxes if apply_ocr: requires_backends(self , '''pytesseract''' ) _a : str = [] _a : str = [] for image in images: _a , _a : Union[str, Any] = apply_tesseract(_a , _a , _a ) words_batch.append(_a ) boxes_batch.append(_a ) if do_resize: _a : List[str] = [self.resize(image=_a , size=_a , resample=_a ) for image in images] if do_rescale: _a : Optional[Any] = [self.rescale(image=_a , scale=_a ) for image in images] if do_normalize: _a : List[Any] = [self.normalize(image=_a , mean=_a , std=_a ) for image in images] _a : List[str] = [to_channel_dimension_format(_a , _a ) for image in images] _a : List[str] = BatchFeature(data={'''pixel_values''': images} , tensor_type=_a ) if apply_ocr: _a : Optional[int] = words_batch _a : List[Any] = boxes_batch return data
15
0
"""simple docstring""" from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from transformers.modeling_outputs import BaseModelOutput from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING a__ = logging.get_logger(__name__) @add_end_docstrings(lowercase_ ) class UpperCAmelCase_ ( lowercase_ ): """simple docstring""" def __init__( self , **_a ) -> Any: super().__init__(**a__ ) if self.framework == "tf": raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" ) requires_backends(self , '''vision''' ) self.check_model_type(a__ ) def __call__( self , _a , _a = None , **_a , ) -> Optional[int]: if "text_queries" in kwargs: _a : str = kwargs.pop('''text_queries''' ) if isinstance(a__ , (str, Image.Image) ): _a : int = {'''image''': image, '''candidate_labels''': candidate_labels} else: _a : int = image _a : Optional[int] = super().__call__(a__ , **a__ ) return results def __lowercase ( self , **_a ) -> int: _a : Union[str, Any] = {} if "threshold" in kwargs: _a : Union[str, Any] = kwargs['''threshold'''] if "top_k" in kwargs: _a : Optional[int] = kwargs['''top_k'''] return {}, {}, postprocess_params def __lowercase ( self , _a ) -> int: _a : int = load_image(inputs['''image'''] ) _a : Optional[int] = inputs['''candidate_labels'''] if isinstance(a__ , a__ ): _a : List[Any] = candidate_labels.split(''',''' ) _a : Any = torch.tensor([[image.height, image.width]] , dtype=torch.intaa ) for i, candidate_label in enumerate(a__ ): _a : str = self.tokenizer(a__ , return_tensors=self.framework ) _a : Optional[Any] = self.image_processor(a__ , return_tensors=self.framework ) yield { "is_last": i == len(a__ ) - 1, "target_size": target_size, "candidate_label": candidate_label, **text_inputs, **image_features, } def __lowercase ( self , _a ) -> Union[str, Any]: _a : Tuple = model_inputs.pop('''target_size''' ) _a : Optional[int] = model_inputs.pop('''candidate_label''' ) _a : Any = model_inputs.pop('''is_last''' ) _a : str = self.model(**a__ ) _a : Dict = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs} return model_outputs def __lowercase ( self , _a , _a=0.1 , _a=None ) -> Tuple: _a : str = [] for model_output in model_outputs: _a : Union[str, Any] = model_output['''candidate_label'''] _a : Dict = BaseModelOutput(a__ ) _a : Union[str, Any] = self.image_processor.post_process_object_detection( outputs=a__ , threshold=a__ , target_sizes=model_output['''target_size'''] )[0] for index in outputs["scores"].nonzero(): _a : str = outputs['''scores'''][index].item() _a : int = self._get_bounding_box(outputs['''boxes'''][index][0] ) _a : Tuple = {'''score''': score, '''label''': label, '''box''': box} results.append(a__ ) _a : Tuple = sorted(a__ , key=lambda _a : x["score"] , reverse=a__ ) if top_k: _a : List[str] = results[:top_k] return results def __lowercase ( self , _a ) -> Dict[str, int]: if self.framework != "pt": raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' ) _a , _a , _a , _a : Optional[Any] = box.int().tolist() _a : Tuple = { '''xmin''': xmin, '''ymin''': ymin, '''xmax''': xmax, '''ymax''': ymax, } return bbox
353
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def __UpperCAmelCase ( ) -> Optional[Any]: """simple docstring""" _a : int = ArgumentParser('''Accelerate CLI tool''' ,usage='''accelerate <command> [<args>]''' ,allow_abbrev=__a ) _a : Optional[int] = parser.add_subparsers(help='''accelerate command helpers''' ) # Register commands get_config_parser(subparsers=__a ) env_command_parser(subparsers=__a ) launch_command_parser(subparsers=__a ) tpu_command_parser(subparsers=__a ) test_command_parser(subparsers=__a ) # Let's go _a : Dict = parser.parse_args() if not hasattr(__a ,'''func''' ): parser.print_help() exit(1 ) # Run args.func(__a ) if __name__ == "__main__": main()
15
0
"""simple docstring""" # Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version a__ = get_logger(__name__) class UpperCAmelCase_ : """simple docstring""" UpperCAmelCase__ : Tuple = '''dummy_data''' UpperCAmelCase__ : List[str] = '''datasets''' UpperCAmelCase__ : Dict = False def __init__( self , _a , _a , _a , _a = None , _a = False , _a = True , _a = None , ) -> Tuple: _a : Optional[int] = 0 _a : str = dataset_name _a : Any = cache_dir _a : int = use_local_dummy_data _a : Optional[int] = config # download_callbacks take a single url as input _a : Any = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root _a : List[Any] = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general _a : str = str(UpperCamelCase_ ) # to be downloaded _a : List[str] = None _a : int = None @property def __lowercase ( self ) -> Optional[int]: if self._dummy_file is None: _a : Optional[int] = self.download_dummy_data() return self._dummy_file @property def __lowercase ( self ) -> Dict: if self.config is not None: # structure is dummy / config_name / version_name return os.path.join('''dummy''' , self.config.name , self.version_name ) # structure is dummy / version_name return os.path.join('''dummy''' , self.version_name ) @property def __lowercase ( self ) -> List[Any]: return os.path.join(self.dummy_data_folder , '''dummy_data.zip''' ) def __lowercase ( self ) -> Tuple: _a : Optional[Any] = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) _a : List[Any] = cached_path( UpperCamelCase_ , cache_dir=self.cache_dir , extract_compressed_file=UpperCamelCase_ , force_extract=UpperCamelCase_ ) return os.path.join(UpperCamelCase_ , self.dummy_file_name ) @property def __lowercase ( self ) -> str: return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file ) @property def __lowercase ( self ) -> List[Any]: if self._bucket_url is None: _a : int = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/''' ) ) return self._bucket_url @property def __lowercase ( self ) -> List[str]: if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , '''/''' ).split('''/''' )[:-1] ) def __lowercase ( self , _a , *_a ) -> Tuple: if self.load_existing_dummy_data: # dummy data is downloaded and tested _a : List[Any] = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned _a : int = self.dummy_file_name # special case when data_url is a dict if isinstance(UpperCamelCase_ , UpperCamelCase_ ): return self.create_dummy_data_dict(UpperCamelCase_ , UpperCamelCase_ ) elif isinstance(UpperCamelCase_ , (list, tuple) ): return self.create_dummy_data_list(UpperCamelCase_ , UpperCamelCase_ ) else: return self.create_dummy_data_single(UpperCamelCase_ , UpperCamelCase_ ) def __lowercase ( self , _a , *_a ) -> List[Any]: return self.download_and_extract(UpperCamelCase_ ) def __lowercase ( self , _a , _a ) -> List[Any]: return self.download_and_extract(UpperCamelCase_ ) def __lowercase ( self , _a , *_a , **_a ) -> Tuple: return path def __lowercase ( self ) -> Tuple: return {} def __lowercase ( self , _a , _a ) -> List[str]: _a : Optional[Any] = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(UpperCamelCase_ , UpperCamelCase_ ): for single_url in single_urls: download_callback(UpperCamelCase_ ) else: _a : List[Any] = single_urls download_callback(UpperCamelCase_ ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(UpperCamelCase_ , UpperCamelCase_ ): _a : Tuple = [os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(Path(UpperCamelCase_ ).name ) ) for x in single_urls] else: _a : List[str] = single_urls _a : int = os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(Path(UpperCamelCase_ ).name ) ) _a : Tuple = value # make sure that values are unique if all(isinstance(UpperCamelCase_ , UpperCamelCase_ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique _a : int = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def __lowercase ( self , _a , _a ) -> Union[str, Any]: _a : List[Any] = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one _a : int = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , UpperCamelCase_ ) ) for url in data_url ) _a : Union[str, Any] = all( url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): _a : Dict = [data_url[0]] * len(UpperCamelCase_ ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(UpperCamelCase_ ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _a : List[Any] = os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) ) dummy_data_list.append(UpperCamelCase_ ) return dummy_data_list def __lowercase ( self , _a , _a ) -> Optional[int]: for download_callback in self.download_callbacks: download_callback(UpperCamelCase_ ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _a : Tuple = os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) ) if os.path.exists(UpperCamelCase_ ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def __lowercase ( self ) -> Dict: pass def __lowercase ( self ) -> Optional[Any]: pass def __lowercase ( self , _a ) -> List[str]: def _iter_archive_members(_a ): # this preserves the order of the members inside the ZIP archive _a : List[Any] = Path(self.dummy_file ).parent _a : Tuple = path.relative_to(UpperCamelCase_ ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: _a : Optional[int] = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(UpperCamelCase_ ) _a : List[str] = Path(UpperCamelCase_ ) _a : Optional[Any] = _iter_archive_members(UpperCamelCase_ ) if self.use_local_dummy_data else path.rglob('''*''' ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ): yield file_path.relative_to(UpperCamelCase_ ).as_posix(), file_path.open('''rb''' ) def __lowercase ( self , _a ) -> Optional[int]: if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): _a : Tuple = [paths] for path in paths: if os.path.isfile(UpperCamelCase_ ): if os.path.basename(UpperCamelCase_ ).startswith(('''.''', '''__''') ): return yield path else: for dirpath, dirnames, filenames in os.walk(UpperCamelCase_ ): if os.path.basename(UpperCamelCase_ ).startswith(('''.''', '''__''') ): continue dirnames.sort() for filename in sorted(UpperCamelCase_ ): if filename.startswith(('''.''', '''__''') ): continue yield os.path.join(UpperCamelCase_ , UpperCamelCase_ )
354
import itertools import os import random import tempfile import unittest import numpy as np from transformers import TvltFeatureExtractor, is_datasets_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch if is_datasets_available(): from datasets import load_dataset a__ = random.Random() def __UpperCAmelCase ( __a : Tuple ,__a : str=1.0 ,__a : Optional[int]=None ,__a : List[Any]=None ) -> Any: """simple docstring""" if rng is None: _a : Dict = global_rng _a : Optional[Any] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __init__( self , _a , _a=7 , _a=4_0_0 , _a=2_0_0_0 , _a=2_0_4_8 , _a=1_2_8 , _a=1 , _a=5_1_2 , _a=3_0 , _a=4_4_1_0_0 , ) -> List[Any]: _a : Optional[Any] = parent _a : str = batch_size _a : List[str] = min_seq_length _a : str = max_seq_length _a : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) _a : List[Any] = spectrogram_length _a : List[str] = feature_size _a : List[Any] = num_audio_channels _a : Tuple = hop_length _a : Optional[int] = chunk_length _a : int = sampling_rate def __lowercase ( self ) -> Union[str, Any]: return { "spectrogram_length": self.spectrogram_length, "feature_size": self.feature_size, "num_audio_channels": self.num_audio_channels, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "sampling_rate": self.sampling_rate, } def __lowercase ( self , _a=False , _a=False ) -> List[Any]: def _flatten(_a ): return list(itertools.chain(*_a ) ) if equal_length: _a : List[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size _a : List[Any] = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: _a : str = [np.asarray(_a ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class UpperCAmelCase_ ( __lowercase , unittest.TestCase ): """simple docstring""" UpperCAmelCase__ : List[Any] = TvltFeatureExtractor def __lowercase ( self ) -> Dict: _a : List[str] = TvltFeatureExtractionTester(self ) def __lowercase ( self ) -> Any: _a : List[Any] = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(_a , '''spectrogram_length''' ) ) self.assertTrue(hasattr(_a , '''feature_size''' ) ) self.assertTrue(hasattr(_a , '''num_audio_channels''' ) ) self.assertTrue(hasattr(_a , '''hop_length''' ) ) self.assertTrue(hasattr(_a , '''chunk_length''' ) ) self.assertTrue(hasattr(_a , '''sampling_rate''' ) ) def __lowercase ( self ) -> Optional[int]: _a : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _a : int = feat_extract_first.save_pretrained(_a )[0] check_json_file_has_correct_format(_a ) _a : Dict = self.feature_extraction_class.from_pretrained(_a ) _a : List[Any] = feat_extract_first.to_dict() _a : Union[str, Any] = feat_extract_second.to_dict() _a : Any = dict_first.pop('''mel_filters''' ) _a : int = dict_second.pop('''mel_filters''' ) self.assertTrue(np.allclose(_a , _a ) ) self.assertEqual(_a , _a ) def __lowercase ( self ) -> Optional[int]: _a : Any = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _a : Optional[int] = os.path.join(_a , '''feat_extract.json''' ) feat_extract_first.to_json_file(_a ) _a : List[str] = self.feature_extraction_class.from_json_file(_a ) _a : List[Any] = feat_extract_first.to_dict() _a : Dict = feat_extract_second.to_dict() _a : str = dict_first.pop('''mel_filters''' ) _a : str = dict_second.pop('''mel_filters''' ) self.assertTrue(np.allclose(_a , _a ) ) self.assertEqual(_a , _a ) def __lowercase ( self ) -> Union[str, Any]: # Initialize feature_extractor _a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict ) # create three inputs of length 800, 1000, and 1200 _a : Any = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )] _a : List[str] = [np.asarray(_a ) for speech_input in speech_inputs] # Test not batched input _a : Tuple = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test batched _a : Dict = feature_extractor(_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test audio masking _a : Union[str, Any] = feature_extractor( _a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 , mask_audio=_a ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test 2-D numpy arrays are batched. _a : Optional[Any] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)] _a : int = np.asarray(_a ) _a : Tuple = feature_extractor(_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) def __lowercase ( self , _a ) -> Optional[Any]: _a : List[Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech _a : Optional[int] = ds.sort('''id''' ).select(range(_a ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def __lowercase ( self ) -> int: _a : Union[str, Any] = self._load_datasamples(1 ) _a : int = TvltFeatureExtractor() _a : Union[str, Any] = feature_extractor(_a , return_tensors='''pt''' ).audio_values self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) ) _a : Union[str, Any] = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] ) self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _a , atol=1e-4 ) )
15
0
from ...configuration_utils import PretrainedConfig from ...utils import logging a__ = logging.get_logger(__name__) a__ = { 'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json', 'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json', } class UpperCAmelCase_ ( lowerCamelCase__ ): """simple docstring""" UpperCAmelCase__ : Optional[int] = "markuplm" def __init__( self , _a=3_0_5_2_2 , _a=7_6_8 , _a=1_2 , _a=1_2 , _a=3_0_7_2 , _a="gelu" , _a=0.1 , _a=0.1 , _a=5_1_2 , _a=2 , _a=0.02 , _a=1e-1_2 , _a=0 , _a=0 , _a=2 , _a=2_5_6 , _a=1_0_2_4 , _a=2_1_6 , _a=1_0_0_1 , _a=3_2 , _a=5_0 , _a="absolute" , _a=True , _a=None , **_a , ) -> List[Any]: super().__init__( pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase , ) _a : int = vocab_size _a : Optional[Any] = hidden_size _a : Any = num_hidden_layers _a : Optional[int] = num_attention_heads _a : str = hidden_act _a : Tuple = intermediate_size _a : Any = hidden_dropout_prob _a : Tuple = attention_probs_dropout_prob _a : List[str] = max_position_embeddings _a : Union[str, Any] = type_vocab_size _a : List[str] = initializer_range _a : str = layer_norm_eps _a : Union[str, Any] = position_embedding_type _a : List[Any] = use_cache _a : Dict = classifier_dropout # additional properties _a : int = max_depth _a : List[Any] = max_xpath_tag_unit_embeddings _a : Optional[Any] = max_xpath_subs_unit_embeddings _a : Tuple = tag_pad_id _a : Any = subs_pad_id _a : Dict = xpath_unit_hidden_size
355
from typing import Dict import numpy as np from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException if is_tf_available(): import tensorflow as tf from ..tf_utils import stable_softmax if is_torch_available(): import torch a__ = logging.get_logger(__name__) @add_end_docstrings( __lowercase , r"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , ) class UpperCAmelCase_ ( __lowercase ): """simple docstring""" def __lowercase ( self , _a ) -> np.ndarray: if self.framework == "tf": _a : List[str] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy() elif self.framework == "pt": _a : Tuple = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_a ) else: raise ValueError('''Unsupported framework''' ) return masked_index def __lowercase ( self , _a ) -> np.ndarray: _a : int = self.get_masked_index(_a ) _a : Tuple = np.prod(masked_index.shape ) if numel < 1: raise PipelineException( '''fill-mask''' , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , ) def __lowercase ( self , _a ) -> Optional[int]: if isinstance(_a , _a ): for model_input in model_inputs: self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] ) else: for input_ids in model_inputs["input_ids"]: self._ensure_exactly_one_mask_token(_a ) def __lowercase ( self , _a , _a=None , **_a ) -> Dict[str, GenericTensor]: if return_tensors is None: _a : Union[str, Any] = self.framework _a : str = self.tokenizer(_a , return_tensors=_a ) self.ensure_exactly_one_mask_token(_a ) return model_inputs def __lowercase ( self , _a ) -> Optional[Any]: _a : List[str] = self.model(**_a ) _a : Any = model_inputs['''input_ids'''] return model_outputs def __lowercase ( self , _a , _a=5 , _a=None ) -> str: # Cap top_k if there are targets if target_ids is not None and target_ids.shape[0] < top_k: _a : List[Any] = target_ids.shape[0] _a : Any = model_outputs['''input_ids'''][0] _a : List[str] = model_outputs['''logits'''] if self.framework == "tf": _a : Tuple = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0] _a : List[str] = outputs.numpy() _a : Dict = outputs[0, masked_index, :] _a : str = stable_softmax(_a , axis=-1 ) if target_ids is not None: _a : Any = tf.gather_nd(tf.squeeze(_a , 0 ) , target_ids.reshape(-1 , 1 ) ) _a : Union[str, Any] = tf.expand_dims(_a , 0 ) _a : Optional[int] = tf.math.top_k(_a , k=_a ) _a , _a : Optional[Any] = topk.values.numpy(), topk.indices.numpy() else: _a : Optional[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_a ).squeeze(-1 ) # Fill mask pipeline supports only one ${mask_token} per sample _a : List[str] = outputs[0, masked_index, :] _a : List[Any] = logits.softmax(dim=-1 ) if target_ids is not None: _a : List[Any] = probs[..., target_ids] _a , _a : Optional[Any] = probs.topk(_a ) _a : Dict = [] _a : List[Any] = values.shape[0] == 1 for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ): _a : Optional[Any] = [] for v, p in zip(_values , _predictions ): # Copy is important since we're going to modify this array in place _a : Optional[int] = input_ids.numpy().copy() if target_ids is not None: _a : Tuple = target_ids[p].tolist() _a : List[str] = p # Filter padding out: _a : List[Any] = tokens[np.where(tokens != self.tokenizer.pad_token_id )] # Originally we skip special tokens to give readable output. # For multi masks though, the other [MASK] would be removed otherwise # making the output look odd, so we add them back _a : List[str] = self.tokenizer.decode(_a , skip_special_tokens=_a ) _a : List[Any] = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence} row.append(_a ) result.append(_a ) if single_mask: return result[0] return result def __lowercase ( self , _a , _a=None ) -> Dict: if isinstance(_a , _a ): _a : Tuple = [targets] try: _a : int = self.tokenizer.get_vocab() except Exception: _a : Any = {} _a : List[Any] = [] for target in targets: _a : List[Any] = vocab.get(_a , _a ) if id_ is None: _a : Tuple = self.tokenizer( _a , add_special_tokens=_a , return_attention_mask=_a , return_token_type_ids=_a , max_length=1 , truncation=_a , )['''input_ids'''] if len(_a ) == 0: logger.warning( F"""The specified target token `{target}` does not exist in the model vocabulary. """ '''We cannot replace it with anything meaningful, ignoring it''' ) continue _a : Tuple = input_ids[0] # XXX: If users encounter this pass # it becomes pretty slow, so let's make sure # The warning enables them to fix the input to # get faster performance. logger.warning( F"""The specified target token `{target}` does not exist in the model vocabulary. """ F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" ) target_ids.append(id_ ) _a : List[str] = list(set(_a ) ) if len(_a ) == 0: raise ValueError('''At least one target must be provided when passed.''' ) _a : int = np.array(_a ) return target_ids def __lowercase ( self , _a=None , _a=None ) -> Tuple: _a : str = {} if targets is not None: _a : List[Any] = self.get_target_ids(_a , _a ) _a : Optional[Any] = target_ids if top_k is not None: _a : Union[str, Any] = top_k if self.tokenizer.mask_token_id is None: raise PipelineException( '''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' ) return {}, {}, postprocess_params def __call__( self , _a , *_a , **_a ) -> int: _a : Optional[Any] = super().__call__(_a , **_a ) if isinstance(_a , _a ) and len(_a ) == 1: return outputs[0] return outputs
15
0
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu a__ = False class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __lowercase ( self ) -> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def __lowercase ( self ) -> Union[str, Any]: return 1_2 @property def __lowercase ( self ) -> Union[str, Any]: return 1_2 @property def __lowercase ( self ) -> Tuple: return 3_2 @property def __lowercase ( self ) -> Dict: torch.manual_seed(0 ) _a : Any = VQModel( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , ) return model @property def __lowercase ( self ) -> Optional[int]: _a : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) return tokenizer @property def __lowercase ( self ) -> Optional[int]: torch.manual_seed(0 ) _a : Any = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) return CLIPTextModel(_A ) @property def __lowercase ( self ) -> Dict: torch.manual_seed(0 ) _a : Any = 1_2 _a : int = 1_2 _a : List[Any] = { 'attention_bias': True, 'cross_attention_dim': 3_2, 'attention_head_dim': height * width, 'num_attention_heads': 1, 'num_vector_embeds': self.num_embed, 'num_embeds_ada_norm': self.num_embeds_ada_norm, 'norm_num_groups': 3_2, 'sample_size': width, 'activation_fn': 'geglu-approximate', } _a : Union[str, Any] = TransformeraDModel(**_A ) return model def __lowercase ( self ) -> Dict: _a : Tuple = 'cpu' _a : List[str] = self.dummy_vqvae _a : str = self.dummy_text_encoder _a : Optional[Any] = self.dummy_tokenizer _a : Dict = self.dummy_transformer _a : Optional[int] = VQDiffusionScheduler(self.num_embed ) _a : List[Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=_A ) _a : List[Any] = VQDiffusionPipeline( vqvae=_A , text_encoder=_A , tokenizer=_A , transformer=_A , scheduler=_A , learned_classifier_free_sampling_embeddings=_A , ) _a : List[Any] = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) _a : Optional[Any] = 'teddy bear playing in the pool' _a : str = torch.Generator(device=_A ).manual_seed(0 ) _a : Union[str, Any] = pipe([prompt] , generator=_A , num_inference_steps=2 , output_type='''np''' ) _a : Optional[int] = output.images _a : int = torch.Generator(device=_A ).manual_seed(0 ) _a : Tuple = pipe( [prompt] , generator=_A , output_type='''np''' , return_dict=_A , num_inference_steps=2 )[0] _a : str = image[0, -3:, -3:, -1] _a : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 2_4, 2_4, 3) _a : str = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def __lowercase ( self ) -> Optional[int]: _a : Optional[Any] = 'cpu' _a : Optional[int] = self.dummy_vqvae _a : List[str] = self.dummy_text_encoder _a : Optional[int] = self.dummy_tokenizer _a : Optional[Any] = self.dummy_transformer _a : Union[str, Any] = VQDiffusionScheduler(self.num_embed ) _a : Optional[int] = LearnedClassifierFreeSamplingEmbeddings( learnable=_A , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length ) _a : Union[str, Any] = VQDiffusionPipeline( vqvae=_A , text_encoder=_A , tokenizer=_A , transformer=_A , scheduler=_A , learned_classifier_free_sampling_embeddings=_A , ) _a : Union[str, Any] = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) _a : Union[str, Any] = 'teddy bear playing in the pool' _a : Optional[int] = torch.Generator(device=_A ).manual_seed(0 ) _a : Tuple = pipe([prompt] , generator=_A , num_inference_steps=2 , output_type='''np''' ) _a : Optional[Any] = output.images _a : str = torch.Generator(device=_A ).manual_seed(0 ) _a : Dict = pipe( [prompt] , generator=_A , output_type='''np''' , return_dict=_A , num_inference_steps=2 )[0] _a : int = image[0, -3:, -3:, -1] _a : Tuple = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 2_4, 2_4, 3) _a : Optional[Any] = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __lowercase ( self ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowercase ( self ) -> Optional[int]: _a : List[Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' ) _a : Union[str, Any] = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' ) _a : Tuple = pipeline.to(_A ) pipeline.set_progress_bar_config(disable=_A ) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though _a : Optional[int] = torch.Generator(device=_A ).manual_seed(0 ) _a : int = pipeline( '''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=_A , output_type='''np''' , ) _a : int = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) assert np.abs(expected_image - image ).max() < 2.0
356
import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow a__ = [ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ '''text-classification''', '''language-modeling''', '''summarization''', '''token-classification''', '''question-answering''', ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_mlm_flax import run_qa import run_summarization_flax import run_ta_mlm_flax logging.basicConfig(level=logging.DEBUG) a__ = logging.getLogger() def __UpperCAmelCase ( ) -> Optional[int]: """simple docstring""" _a : Any = argparse.ArgumentParser() parser.add_argument('''-f''' ) _a : Dict = parser.parse_args() return args.f def __UpperCAmelCase ( __a : Optional[int] ,__a : List[str]="eval" ) -> Any: """simple docstring""" _a : Any = os.path.join(__a ,F"""{split}_results.json""" ) if os.path.exists(__a ): with open(__a ,'''r''' ) as f: return json.load(__a ) raise ValueError(F"""can't find {path}""" ) a__ = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class UpperCAmelCase_ ( __lowercase ): """simple docstring""" def __lowercase ( self ) -> str: _a : Any = self.get_auto_remove_tmp_dir() _a : Optional[Any] = F""" run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --eval_steps=2 --warmup_steps=2 --seed=42 --max_seq_length=128 """.split() with patch.object(_a , '''argv''' , _a ): run_flax_glue.main() _a : Any = get_results(_a ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) @slow def __lowercase ( self ) -> Dict: _a : Tuple = self.get_auto_remove_tmp_dir() _a : Tuple = F""" run_clm_flax.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --block_size 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir """.split() with patch.object(_a , '''argv''' , _a ): run_clm_flax.main() _a : List[str] = get_results(_a ) self.assertLess(result['''eval_perplexity'''] , 1_0_0 ) @slow def __lowercase ( self ) -> Optional[int]: _a : str = self.get_auto_remove_tmp_dir() _a : Optional[int] = F""" run_summarization.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --test_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=8 --do_train --do_eval --do_predict --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --predict_with_generate """.split() with patch.object(_a , '''argv''' , _a ): run_summarization_flax.main() _a : Optional[int] = get_results(_a , split='''test''' ) self.assertGreaterEqual(result['''test_rouge1'''] , 1_0 ) self.assertGreaterEqual(result['''test_rouge2'''] , 2 ) self.assertGreaterEqual(result['''test_rougeL'''] , 7 ) self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 ) @slow def __lowercase ( self ) -> Tuple: _a : List[str] = self.get_auto_remove_tmp_dir() _a : List[Any] = F""" run_mlm.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --overwrite_output_dir --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --logging_steps 2 --eval_steps 2 --do_train --do_eval --num_train_epochs=1 """.split() with patch.object(_a , '''argv''' , _a ): run_mlm_flax.main() _a : List[Any] = get_results(_a ) self.assertLess(result['''eval_perplexity'''] , 4_2 ) @slow def __lowercase ( self ) -> Dict: _a : Optional[Any] = self.get_auto_remove_tmp_dir() _a : int = F""" run_t5_mlm_flax.py --model_name_or_path t5-small --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir """.split() with patch.object(_a , '''argv''' , _a ): run_ta_mlm_flax.main() _a : List[Any] = get_results(_a ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 ) @slow def __lowercase ( self ) -> Optional[Any]: # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu _a : Any = 7 if get_gpu_count() > 1 else 2 _a : List[Any] = self.get_auto_remove_tmp_dir() _a : List[Any] = F""" run_flax_ner.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --overwrite_output_dir --do_train --do_eval --warmup_steps=2 --learning_rate=2e-4 --logging_steps 2 --eval_steps 2 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 """.split() with patch.object(_a , '''argv''' , _a ): run_flax_ner.main() _a : Dict = get_results(_a ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) self.assertGreaterEqual(result['''eval_f1'''] , 0.3 ) @slow def __lowercase ( self ) -> Any: _a : Optional[int] = self.get_auto_remove_tmp_dir() _a : Union[str, Any] = F""" run_qa.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=2 --do_train --do_eval --logging_steps 2 --eval_steps 2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 """.split() with patch.object(_a , '''argv''' , _a ): run_qa.main() _a : Any = get_results(_a ) self.assertGreaterEqual(result['''eval_f1'''] , 3_0 ) self.assertGreaterEqual(result['''eval_exact'''] , 3_0 )
15
0
import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(lowerCamelCase__ ) , "Tatoeba directory does not exist." ) class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" @cached_property def __lowercase ( self ) -> Dict: _a : List[Any] = tempfile.mkdtemp() return TatoebaConverter(save_dir=__snake_case ) @slow def __lowercase ( self ) -> List[Any]: self.resolver.convert_models(['''heb-eng'''] ) @slow def __lowercase ( self ) -> List[str]: _a : Optional[int] = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=__snake_case ) assert mmeta["long_pair"] == "heb-eng"
357
import argparse import os import re import packaging.version a__ = '''examples/''' a__ = { '''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''), '''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''), '''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''), '''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''), } a__ = { '''init''': '''src/transformers/__init__.py''', '''setup''': '''setup.py''', } a__ = '''README.md''' def __UpperCAmelCase ( __a : List[str] ,__a : int ,__a : Optional[Any] ) -> int: """simple docstring""" with open(__a ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f: _a : Tuple = f.read() _a , _a : str = REPLACE_PATTERNS[pattern] _a : List[str] = replace.replace('''VERSION''' ,__a ) _a : List[Any] = re_pattern.sub(__a ,__a ) with open(__a ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f: f.write(__a ) def __UpperCAmelCase ( __a : Any ) -> List[Any]: """simple docstring""" for folder, directories, fnames in os.walk(__a ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('''research_projects''' ) if "legacy" in directories: directories.remove('''legacy''' ) for fname in fnames: if fname.endswith('''.py''' ): update_version_in_file(os.path.join(__a ,__a ) ,__a ,pattern='''examples''' ) def __UpperCAmelCase ( __a : List[Any] ,__a : List[str]=False ) -> int: """simple docstring""" for pattern, fname in REPLACE_FILES.items(): update_version_in_file(__a ,__a ,__a ) if not patch: update_version_in_examples(__a ) def __UpperCAmelCase ( ) -> List[str]: """simple docstring""" _a : Optional[Any] = '''🤗 Transformers currently provides the following architectures''' _a : str = '''1. Want to contribute a new model?''' with open(__a ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f: _a : Optional[int] = f.readlines() # Find the start of the list. _a : Optional[int] = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 _a : List[Any] = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('''1.''' ): _a : Tuple = lines[index].replace( '''https://huggingface.co/docs/transformers/main/model_doc''' ,'''https://huggingface.co/docs/transformers/model_doc''' ,) index += 1 with open(__a ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f: f.writelines(__a ) def __UpperCAmelCase ( ) -> List[str]: """simple docstring""" with open(REPLACE_FILES['''init'''] ,'''r''' ) as f: _a : Optional[Any] = f.read() _a : Optional[Any] = REPLACE_PATTERNS['''init'''][0].search(__a ).groups()[0] return packaging.version.parse(__a ) def __UpperCAmelCase ( __a : Dict=False ) -> str: """simple docstring""" _a : Optional[Any] = get_version() if patch and default_version.is_devrelease: raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' ) if default_version.is_devrelease: _a : List[Any] = default_version.base_version elif patch: _a : str = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}""" else: _a : List[str] = F"""{default_version.major}.{default_version.minor + 1}.0""" # Now let's ask nicely if that's the right one. _a : Dict = input(F"""Which version are you releasing? [{default_version}]""" ) if len(__a ) == 0: _a : int = default_version print(F"""Updating version to {version}.""" ) global_version_update(__a ,patch=__a ) if not patch: print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() def __UpperCAmelCase ( ) -> Tuple: """simple docstring""" _a : str = get_version() _a : int = F"""{current_version.major}.{current_version.minor + 1}.0.dev0""" _a : List[Any] = current_version.base_version # Check with the user we got that right. _a : Union[str, Any] = input(F"""Which version are we developing now? [{dev_version}]""" ) if len(__a ) == 0: _a : List[str] = dev_version print(F"""Updating version to {version}.""" ) global_version_update(__a ) print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() if __name__ == "__main__": a__ = argparse.ArgumentParser() parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''') parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''') a__ = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('''Nothing to do after a patch :-)''') else: post_release_work()
15
0
from datetime import datetime import matplotlib.pyplot as plt import torch def __UpperCAmelCase ( __a : Optional[Any] ) -> List[str]: """simple docstring""" for param in module.parameters(): _a : List[str] = False def __UpperCAmelCase ( ) -> Dict: """simple docstring""" _a : Union[str, Any] = '''cuda''' if torch.cuda.is_available() else '''cpu''' if torch.backends.mps.is_available() and torch.backends.mps.is_built(): _a : Optional[int] = '''mps''' if device == "mps": print( '''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch''' ''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues''' ''' with generations.''' ) return device def __UpperCAmelCase ( __a : List[Any] ) -> int: """simple docstring""" _a : Tuple = plt.imshow(lowerCAmelCase__ ) fig.axes.get_xaxis().set_visible(lowerCAmelCase__ ) fig.axes.get_yaxis().set_visible(lowerCAmelCase__ ) plt.show() def __UpperCAmelCase ( ) -> Optional[int]: """simple docstring""" _a : Optional[Any] = datetime.now() _a : int = current_time.strftime('''%H:%M:%S''' ) return timestamp
358
def __UpperCAmelCase ( __a : int ) -> int: """simple docstring""" if n == 1 or not isinstance(__a ,__a ): return 0 elif n == 2: return 1 else: _a : Any = [0, 1] for i in range(2 ,n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def __UpperCAmelCase ( __a : int ) -> int: """simple docstring""" _a : Any = 0 _a : Dict = 2 while digits < n: index += 1 _a : Dict = len(str(fibonacci(__a ) ) ) return index def __UpperCAmelCase ( __a : int = 1_000 ) -> int: """simple docstring""" return fibonacci_digits_index(__a ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
15
0
from statistics import mean import numpy as np def __UpperCAmelCase ( __a : Optional[int] ,__a : Optional[Any] ,__a : List[Any] ,__a : int ) -> list: """simple docstring""" _a : Any = 0 # Number of processes finished _a : Tuple = 0 # Displays the finished process. # If it is 0, the performance is completed if it is 1, before the performance. _a : Optional[int] = [0] * no_of_process # List to include calculation results _a : Optional[Any] = [0] * no_of_process # Sort by arrival time. _a : Optional[int] = [burst_time[i] for i in np.argsort(__a )] _a : Any = [process_name[i] for i in np.argsort(__a )] arrival_time.sort() while no_of_process > finished_process_count: _a : str = 0 while finished_process[i] == 1: i += 1 if current_time < arrival_time[i]: _a : str = arrival_time[i] _a : Optional[Any] = 0 # Index showing the location of the process being performed _a : Optional[Any] = 0 # Saves the current response ratio. _a : Dict = 0 for i in range(0 ,__a ): if finished_process[i] == 0 and arrival_time[i] <= current_time: _a : int = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[ i ] if response_ratio < temp: _a : str = temp _a : int = i # Calculate the turn around time _a : Union[str, Any] = current_time + burst_time[loc] - arrival_time[loc] current_time += burst_time[loc] # Indicates that the process has been performed. _a : Dict = 1 # Increase finished_process_count by 1 finished_process_count += 1 return turn_around_time def __UpperCAmelCase ( __a : Tuple ,__a : Optional[Any] ,__a : Union[str, Any] ,__a : List[str] ) -> list: """simple docstring""" _a : Union[str, Any] = [0] * no_of_process for i in range(0 ,__a ): _a : Optional[Any] = turn_around_time[i] - burst_time[i] return waiting_time if __name__ == "__main__": a__ = 5 a__ = ['''A''', '''B''', '''C''', '''D''', '''E'''] a__ = [1, 2, 3, 4, 5] a__ = [1, 2, 3, 4, 5] a__ = calculate_turn_around_time( process_name, arrival_time, burst_time, no_of_process ) a__ = calculate_waiting_time( process_name, turn_around_time, burst_time, no_of_process ) print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''') for i in range(0, no_of_process): print( f'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t''' f'''{turn_around_time[i]}\t\t\t{waiting_time[i]}''' ) print(f'''average waiting time : {mean(waiting_time):.5f}''') print(f'''average turn around time : {mean(turn_around_time):.5f}''')
359
from sklearn.metrics import fa_score, matthews_corrcoef import datasets from .record_evaluation import evaluate as evaluate_record a__ = '''\ @article{wang2019superglue, title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems}, author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R}, journal={arXiv preprint arXiv:1905.00537}, year={2019} } ''' a__ = '''\ SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after GLUE with a new set of more difficult language understanding tasks, improved resources, and a new public leaderboard. ''' a__ = ''' Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset. Args: predictions: list of predictions to score. Depending on the SuperGlUE subset: - for \'record\': list of question-answer dictionaries with the following keys: - \'idx\': index of the question as specified by the dataset - \'prediction_text\': the predicted answer text - for \'multirc\': list of question-answer dictionaries with the following keys: - \'idx\': index of the question-answer pair as specified by the dataset - \'prediction\': the predicted answer label - otherwise: list of predicted labels references: list of reference labels. Depending on the SuperGLUE subset: - for \'record\': list of question-answers dictionaries with the following keys: - \'idx\': index of the question as specified by the dataset - \'answers\': list of possible answers - otherwise: list of reference labels Returns: depending on the SuperGLUE subset: - for \'record\': - \'exact_match\': Exact match between answer and gold answer - \'f1\': F1 score - for \'multirc\': - \'exact_match\': Exact match between answer and gold answer - \'f1_m\': Per-question macro-F1 score - \'f1_a\': Average F1 score over all answers - for \'axb\': \'matthews_correlation\': Matthew Correlation - for \'cb\': - \'accuracy\': Accuracy - \'f1\': F1 score - for all others: - \'accuracy\': Accuracy Examples: >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"] >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\') >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0, \'f1\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\') >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}] >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 1.0, \'f1\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\') >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'matthews_correlation\': 1.0} ''' def __UpperCAmelCase ( __a : int ,__a : List[str] ) -> Optional[Any]: """simple docstring""" return float((preds == labels).mean() ) def __UpperCAmelCase ( __a : List[Any] ,__a : Union[str, Any] ,__a : List[str]="binary" ) -> Optional[int]: """simple docstring""" _a : List[str] = simple_accuracy(__a ,__a ) _a : Any = float(fa_score(y_true=__a ,y_pred=__a ,average=__a ) ) return { "accuracy": acc, "f1": fa, } def __UpperCAmelCase ( __a : Optional[Any] ,__a : str ) -> List[Any]: """simple docstring""" _a : Union[str, Any] = {} for id_pred, label in zip(__a ,__a ): _a : Optional[int] = F"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}""" _a : Optional[Any] = id_pred['''prediction'''] if question_id in question_map: question_map[question_id].append((pred, label) ) else: _a : str = [(pred, label)] _a , _a : Any = [], [] for question, preds_labels in question_map.items(): _a , _a : Any = zip(*__a ) _a : List[Any] = fa_score(y_true=__a ,y_pred=__a ,average='''macro''' ) fas.append(__a ) _a : List[str] = int(sum(pred == label for pred, label in preds_labels ) == len(__a ) ) ems.append(__a ) _a : List[str] = float(sum(__a ) / len(__a ) ) _a : str = sum(__a ) / len(__a ) _a : Optional[int] = float(fa_score(y_true=__a ,y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) ) return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a} @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase_ ( datasets.Metric ): """simple docstring""" def __lowercase ( self ) -> List[Any]: if self.config_name not in [ "boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg", ]: raise KeyError( '''You should supply a configuration name selected in ''' '''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , ) def __lowercase ( self ) -> Any: if self.config_name == "record": return { "predictions": { "idx": { "passage": datasets.Value('''int64''' ), "query": datasets.Value('''int64''' ), }, "prediction_text": datasets.Value('''string''' ), }, "references": { "idx": { "passage": datasets.Value('''int64''' ), "query": datasets.Value('''int64''' ), }, "answers": datasets.Sequence(datasets.Value('''string''' ) ), }, } elif self.config_name == "multirc": return { "predictions": { "idx": { "answer": datasets.Value('''int64''' ), "paragraph": datasets.Value('''int64''' ), "question": datasets.Value('''int64''' ), }, "prediction": datasets.Value('''int64''' ), }, "references": datasets.Value('''int64''' ), } else: return { "predictions": datasets.Value('''int64''' ), "references": datasets.Value('''int64''' ), } def __lowercase ( self , _a , _a ) -> Optional[Any]: if self.config_name == "axb": return {"matthews_correlation": matthews_corrcoef(_a , _a )} elif self.config_name == "cb": return acc_and_fa(_a , _a , fa_avg='''macro''' ) elif self.config_name == "record": _a : Any = [ { '''qas''': [ {'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]} for ref in references ] } ] _a : Any = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions} return evaluate_record(_a , _a )[0] elif self.config_name == "multirc": return evaluate_multirc(_a , _a ) elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]: return {"accuracy": simple_accuracy(_a , _a )} else: raise KeyError( '''You should supply a configuration name selected in ''' '''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
15
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available a__ = { "configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ = ["BloomTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ = [ "BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST", "BloomForCausalLM", "BloomModel", "BloomPreTrainedModel", "BloomForSequenceClassification", "BloomForTokenClassification", "BloomForQuestionAnswering", ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
360
import numpy as np def __UpperCAmelCase ( __a : np.ndarray ,__a : np.ndarray ,__a : float = 1E-12 ,__a : int = 100 ,) -> tuple[float, np.ndarray]: """simple docstring""" assert np.shape(__a )[0] == np.shape(__a )[1] # Ensure proper dimensionality. assert np.shape(__a )[0] == np.shape(__a )[0] # Ensure inputs are either both complex or both real assert np.iscomplexobj(__a ) == np.iscomplexobj(__a ) _a : List[str] = np.iscomplexobj(__a ) if is_complex: # Ensure complex input_matrix is Hermitian assert np.array_equal(__a ,input_matrix.conj().T ) # Set convergence to False. Will define convergence when we exceed max_iterations # or when we have small changes from one iteration to next. _a : List[str] = False _a : List[str] = 0 _a : Tuple = 0 _a : str = 1E12 while not convergence: # Multiple matrix by the vector. _a : str = np.dot(__a ,__a ) # Normalize the resulting output vector. _a : List[Any] = w / np.linalg.norm(__a ) # Find rayleigh quotient # (faster than usual b/c we know vector is normalized already) _a : Dict = vector.conj().T if is_complex else vector.T _a : Tuple = np.dot(__a ,np.dot(__a ,__a ) ) # Check convergence. _a : List[str] = np.abs(lambda_ - lambda_previous ) / lambda_ iterations += 1 if error <= error_tol or iterations >= max_iterations: _a : Dict = True _a : str = lambda_ if is_complex: _a : Tuple = np.real(lambda_ ) return lambda_, vector def __UpperCAmelCase ( ) -> None: """simple docstring""" _a : List[str] = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] ) _a : int = np.array([41, 4, 20] ) _a : Optional[Any] = real_input_matrix.astype(np.complexaaa ) _a : int = np.triu(1j * complex_input_matrix ,1 ) complex_input_matrix += imag_matrix complex_input_matrix += -1 * imag_matrix.T _a : Union[str, Any] = np.array([41, 4, 20] ).astype(np.complexaaa ) for problem_type in ["real", "complex"]: if problem_type == "real": _a : Optional[int] = real_input_matrix _a : Union[str, Any] = real_vector elif problem_type == "complex": _a : str = complex_input_matrix _a : str = complex_vector # Our implementation. _a , _a : Optional[Any] = power_iteration(__a ,__a ) # Numpy implementation. # Get eigenvalues and eigenvectors using built-in numpy # eigh (eigh used for symmetric or hermetian matrices). _a , _a : List[str] = np.linalg.eigh(__a ) # Last eigenvalue is the maximum one. _a : Tuple = eigen_values[-1] # Last column in this matrix is eigenvector corresponding to largest eigenvalue. _a : List[Any] = eigen_vectors[:, -1] # Check our implementation and numpy gives close answers. assert np.abs(eigen_value - eigen_value_max ) <= 1E-6 # Take absolute values element wise of each eigenvector. # as they are only unique to a minus sign. assert np.linalg.norm(np.abs(__a ) - np.abs(__a ) ) <= 1E-6 if __name__ == "__main__": import doctest doctest.testmod() test_power_iteration()
15
0
import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __lowercase ( self ) -> int: _a : int = torch.nn.Linear(1_0 , 1_0 ) _a : Any = torch.optim.SGD(model.parameters() , 0.1 ) _a : Tuple = Accelerator() _a : List[Any] = accelerator.prepare(__a ) try: pickle.loads(pickle.dumps(__a ) ) except Exception as e: self.fail(F"""Accelerated optimizer pickling failed with {e}""" ) AcceleratorState._reset_state()
361
import itertools from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class UpperCAmelCase_ ( datasets.BuilderConfig ): """simple docstring""" UpperCAmelCase__ : Optional[datasets.Features] = None class UpperCAmelCase_ ( datasets.ArrowBasedBuilder ): """simple docstring""" UpperCAmelCase__ : Any = PandasConfig def __lowercase ( self ) -> Any: return datasets.DatasetInfo(features=self.config.features ) def __lowercase ( self , _a ) -> List[Any]: if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) _a : str = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_a , (str, list, tuple) ): _a : Dict = data_files if isinstance(_a , _a ): _a : Dict = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive _a : int = [dl_manager.iter_files(_a ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] _a : Optional[Any] = [] for split_name, files in data_files.items(): if isinstance(_a , _a ): _a : List[str] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive _a : Any = [dl_manager.iter_files(_a ) for file in files] splits.append(datasets.SplitGenerator(name=_a , gen_kwargs={'''files''': files} ) ) return splits def __lowercase ( self , _a ) -> pa.Table: if self.config.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example _a : Optional[Any] = table_cast(_a , self.config.features.arrow_schema ) return pa_table def __lowercase ( self , _a ) -> List[str]: for i, file in enumerate(itertools.chain.from_iterable(_a ) ): with open(_a , '''rb''' ) as f: _a : str = pa.Table.from_pandas(pd.read_pickle(_a ) ) yield i, self._cast_table(_a )
15
0
from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging a__ = logging.get_logger(__name__) a__ = { '''huggingface/time-series-transformer-tourism-monthly''': ( '''https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json''' ), # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer } class UpperCAmelCase_ ( _snake_case ): """simple docstring""" UpperCAmelCase__ : Optional[Any] = "time_series_transformer" UpperCAmelCase__ : str = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", "num_hidden_layers": "encoder_layers", } def __init__( self , _a = None , _a = None , _a = "student_t" , _a = "nll" , _a = 1 , _a = [1, 2, 3, 4, 5, 6, 7] , _a = "mean" , _a = 0 , _a = 0 , _a = 0 , _a = 0 , _a = None , _a = None , _a = 3_2 , _a = 3_2 , _a = 2 , _a = 2 , _a = 2 , _a = 2 , _a = True , _a = "gelu" , _a = 6_4 , _a = 0.1 , _a = 0.1 , _a = 0.1 , _a = 0.1 , _a = 0.1 , _a = 1_0_0 , _a = 0.02 , _a=True , **_a , ) -> Optional[int]: _a : Optional[int] = prediction_length _a : Tuple = context_length or prediction_length _a : Union[str, Any] = distribution_output _a : Any = loss _a : Optional[Any] = input_size _a : Optional[int] = num_time_features _a : Tuple = lags_sequence _a : Any = scaling _a : int = num_dynamic_real_features _a : Dict = num_static_real_features _a : Union[str, Any] = num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(UpperCamelCase__ ) != num_static_categorical_features: raise ValueError( '''The cardinality should be a list of the same length as `num_static_categorical_features`''' ) _a : int = cardinality else: _a : Tuple = [0] if embedding_dimension and num_static_categorical_features > 0: if len(UpperCamelCase__ ) != num_static_categorical_features: raise ValueError( '''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' ) _a : Tuple = embedding_dimension else: _a : Dict = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality] _a : str = num_parallel_samples # Transformer architecture configuration _a : Tuple = input_size * len(UpperCamelCase__ ) + self._number_of_features _a : Union[str, Any] = d_model _a : str = encoder_attention_heads _a : str = decoder_attention_heads _a : Dict = encoder_ffn_dim _a : List[str] = decoder_ffn_dim _a : List[Any] = encoder_layers _a : Union[str, Any] = decoder_layers _a : Optional[int] = dropout _a : int = attention_dropout _a : Any = activation_dropout _a : Optional[Any] = encoder_layerdrop _a : Union[str, Any] = decoder_layerdrop _a : Tuple = activation_function _a : Union[str, Any] = init_std _a : Union[str, Any] = use_cache super().__init__(is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__ ) @property def __lowercase ( self ) -> int: return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
362
def __UpperCAmelCase ( __a : int ,__a : int ,__a : int ) -> int: """simple docstring""" if exponent == 1: return base if exponent % 2 == 0: _a : List[Any] = _modexpt(__a ,exponent // 2 ,__a ) % modulo_value return (x * x) % modulo_value else: return (base * _modexpt(__a ,exponent - 1 ,__a )) % modulo_value def __UpperCAmelCase ( __a : int = 1_777 ,__a : int = 1_855 ,__a : int = 8 ) -> int: """simple docstring""" _a : List[Any] = base for _ in range(1 ,__a ): _a : Any = _modexpt(__a ,__a ,10**digits ) return result if __name__ == "__main__": print(f'''{solution() = }''')
15
0
import inspect import os import unittest import torch import accelerate from accelerate import debug_launcher from accelerate.test_utils import ( execute_subprocess_async, require_cpu, require_huggingface_suite, require_multi_gpu, require_single_gpu, ) from accelerate.utils import patch_environment @require_huggingface_suite class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __lowercase ( self ) -> Dict: _a : Union[str, Any] = inspect.getfile(accelerate.test_utils ) _a : List[Any] = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] ) from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401 _a : List[Any] = test_metrics @require_cpu def __lowercase ( self ) -> Optional[Any]: debug_launcher(self.test_metrics.main , num_processes=1 ) @require_cpu def __lowercase ( self ) -> Optional[Any]: debug_launcher(self.test_metrics.main ) @require_single_gpu def __lowercase ( self ) -> List[Any]: self.test_metrics.main() @require_multi_gpu def __lowercase ( self ) -> Dict: print(F"""Found {torch.cuda.device_count()} devices.""" ) _a : int = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(_A , env=os.environ.copy() )
363
import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging a__ = '''\ ''' a__ = ''' Perplexity (PPL) is one of the most common metrics for evaluating language models. It is defined as the exponentiated average negative log-likelihood of a sequence. For more information, see https://huggingface.co/docs/transformers/perplexity ''' a__ = ''' Args: model_id (str): model used for calculating Perplexity NOTE: Perplexity can only be calculated for causal language models. This includes models such as gpt2, causal variations of bert, causal versions of t5, and more (the full list can be found in the AutoModelForCausalLM documentation here: https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM ) input_texts (list of str): input text, each separate text snippet is one list entry. batch_size (int): the batch size to run texts through the model. Defaults to 16. add_start_token (bool): whether to add the start token to the texts, so the perplexity can include the probability of the first word. Defaults to True. device (str): device to run on, defaults to \'cuda\' when available Returns: perplexity: dictionary containing the perplexity scores for the texts in the input list, as well as the mean perplexity. If one of the input texts is longer than the max input length of the model, then it is truncated to the max length for the perplexity computation. Examples: Example 1: >>> perplexity = datasets.load_metric("perplexity") >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"] >>> results = perplexity.compute(model_id=\'gpt2\', ... add_start_token=False, ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) [\'perplexities\', \'mean_perplexity\'] >>> print(round(results["mean_perplexity"], 2)) 78.22 >>> print(round(results["perplexities"][0], 2)) 11.11 Example 2: >>> perplexity = datasets.load_metric("perplexity") >>> input_texts = datasets.load_dataset("wikitext", ... "wikitext-2-raw-v1", ... split="test")["text"][:50] # doctest:+ELLIPSIS [...] >>> input_texts = [s for s in input_texts if s!=\'\'] >>> results = perplexity.compute(model_id=\'gpt2\', ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) [\'perplexities\', \'mean_perplexity\'] >>> print(round(results["mean_perplexity"], 2)) 60.35 >>> print(round(results["perplexities"][0], 2)) 81.12 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase_ ( datasets.Metric ): """simple docstring""" def __lowercase ( self ) -> Any: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''input_texts''': datasets.Value('''string''' ), } ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , ) def __lowercase ( self , _a , _a , _a = 1_6 , _a = True , _a=None ) -> List[Any]: if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": _a : List[str] = '''cuda''' else: _a : Optional[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu''' _a : Dict = AutoModelForCausalLM.from_pretrained(_a ) _a : List[Any] = model.to(_a ) _a : List[str] = AutoTokenizer.from_pretrained(_a ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: _a : str = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(_a ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" _a : List[Any] = model.config.max_length - 1 else: _a : List[str] = model.config.max_length _a : Union[str, Any] = tokenizer( _a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , return_tensors='''pt''' , return_attention_mask=_a , ).to(_a ) _a : List[Any] = encodings['''input_ids'''] _a : int = encodings['''attention_mask'''] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." _a : Optional[int] = [] _a : Dict = CrossEntropyLoss(reduction='''none''' ) for start_index in logging.tqdm(range(0 , len(_a ) , _a ) ): _a : Dict = min(start_index + batch_size , len(_a ) ) _a : Union[str, Any] = encoded_texts[start_index:end_index] _a : int = attn_masks[start_index:end_index] if add_start_token: _a : Dict = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_a ) _a : List[str] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) _a : Dict = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_a ), attn_mask] , dim=1 ) _a : Dict = encoded_batch with torch.no_grad(): _a : Any = model(_a , attention_mask=_a ).logits _a : List[str] = out_logits[..., :-1, :].contiguous() _a : Union[str, Any] = labels[..., 1:].contiguous() _a : Optional[int] = attn_mask[..., 1:].contiguous() _a : Union[str, Any] = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , _a ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(_a )}
15
0
"""simple docstring""" from __future__ import annotations def __UpperCAmelCase ( __a : list[int] ) -> list[int]: """simple docstring""" if len(snake_case__ ) == 0: return array _a : int = min(snake_case__ ), max(snake_case__ ) # Compute the variables _a : Dict = _max - _min + 1 _a : Union[str, Any] = [0] * holes_range, [0] * holes_range # Make the sorting. for i in array: _a : Union[str, Any] = i - _min _a : Optional[int] = i holes_repeat[index] += 1 # Makes the array back by replacing the numbers. _a : Tuple = 0 for i in range(snake_case__ ): while holes_repeat[i] > 0: _a : int = holes[i] index += 1 holes_repeat[i] -= 1 # Returns the sorted array. return array if __name__ == "__main__": import doctest doctest.testmod() a__ = input('''Enter numbers separated by comma:\n''') a__ = [int(x) for x in user_input.split(''',''')] print(pigeon_sort(unsorted))
364
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a__ = { '''configuration_xmod''': [ '''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XmodConfig''', '''XmodOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ = [ '''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XmodForCausalLM''', '''XmodForMaskedLM''', '''XmodForMultipleChoice''', '''XmodForQuestionAnswering''', '''XmodForSequenceClassification''', '''XmodForTokenClassification''', '''XmodModel''', '''XmodPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
15
0
from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCAmelCase_ : """simple docstring""" def __init__( self , _a , _a=1_3 , _a=3_0 , _a=2 , _a=3 , _a=True , _a=True , _a=3_2 , _a=2 , _a=4 , _a=3_7 , _a="gelu" , _a=0.1 , _a=0.1 , _a=1_0 , _a=0.02 , _a=3 , _a=0.6 , _a=None , ) -> int: _a : Union[str, Any] = parent _a : Optional[Any] = batch_size _a : List[Any] = image_size _a : int = patch_size _a : Optional[int] = num_channels _a : Optional[Any] = is_training _a : Union[str, Any] = use_labels _a : Any = hidden_size _a : Tuple = num_hidden_layers _a : int = num_attention_heads _a : Optional[Any] = intermediate_size _a : Dict = hidden_act _a : int = hidden_dropout_prob _a : List[str] = attention_probs_dropout_prob _a : Tuple = type_sequence_label_size _a : Optional[int] = initializer_range _a : List[str] = mask_ratio _a : Tuple = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) _a : str = (image_size // patch_size) ** 2 _a : Optional[Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def __lowercase ( self ) -> Optional[Any]: _a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _a : Union[str, Any] = None if self.use_labels: _a : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _a : Optional[Any] = self.get_config() return config, pixel_values, labels def __lowercase ( self ) -> Any: return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_a , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def __lowercase ( self , _a , _a , _a ) -> Union[str, Any]: _a : Tuple = TFViTMAEModel(config=_a ) _a : List[str] = model(_a , training=_a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowercase ( self , _a , _a , _a ) -> str: _a : Optional[Any] = TFViTMAEForPreTraining(_a ) _a : str = model(_a , training=_a ) # expected sequence length = num_patches _a : str = (self.image_size // self.patch_size) ** 2 _a : int = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images _a : str = 1 _a : str = TFViTMAEForPreTraining(_a ) _a : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _a : int = model(_a , training=_a ) _a : int = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def __lowercase ( self ) -> List[Any]: _a : str = self.prepare_config_and_inputs() ((_a) , (_a) , (_a)) : Optional[int] = config_and_inputs _a : Dict = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ): """simple docstring""" UpperCAmelCase__ : Optional[int] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () UpperCAmelCase__ : List[Any] = {"feature-extraction": TFViTMAEModel} if is_tf_available() else {} UpperCAmelCase__ : Union[str, Any] = False UpperCAmelCase__ : int = False UpperCAmelCase__ : Optional[Any] = False UpperCAmelCase__ : Any = False def __lowercase ( self ) -> Union[str, Any]: _a : Dict = TFViTMAEModelTester(self ) _a : List[Any] = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=3_7 ) def __lowercase ( self ) -> Tuple: self.config_tester.run_common_tests() @unittest.skip(reason='''ViTMAE does not use inputs_embeds''' ) def __lowercase ( self ) -> str: pass def __lowercase ( self ) -> List[str]: _a , _a : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a : Tuple = model_class(_a ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) _a : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_a , tf.keras.layers.Layer ) ) def __lowercase ( self ) -> List[Any]: _a , _a : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a : Dict = model_class(_a ) _a : str = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _a : Any = [*signature.parameters.keys()] _a : Dict = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _a ) def __lowercase ( self ) -> List[str]: _a : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_a ) def __lowercase ( self ) -> Any: _a : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*_a ) def __lowercase ( self ) -> List[Any]: # make the mask reproducible np.random.seed(2 ) _a , _a : str = self.model_tester.prepare_config_and_inputs_for_common() _a : Any = int((config.image_size // config.patch_size) ** 2 ) _a : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: _a : Optional[Any] = model_class(_a ) _a : int = self._prepare_for_class(_a , _a ) _a : List[str] = model(_a , noise=_a ) _a : Dict = copy.deepcopy(self._prepare_for_class(_a , _a ) ) _a : str = model(**_a , noise=_a ) _a : List[str] = outputs_dict[0].numpy() _a : Union[str, Any] = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 ) def __lowercase ( self ) -> Dict: # make the mask reproducible np.random.seed(2 ) _a , _a : str = self.model_tester.prepare_config_and_inputs_for_common() _a : Optional[Any] = int((config.image_size // config.patch_size) ** 2 ) _a : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(_a ): _a : Optional[int] = {} for k, v in inputs_dict.items(): if tf.is_tensor(_a ): _a : Optional[Any] = v.numpy() else: _a : Dict = np.array(_a ) return inputs_np_dict for model_class in self.all_model_classes: _a : List[str] = model_class(_a ) _a : List[str] = self._prepare_for_class(_a , _a ) _a : Tuple = prepare_numpy_arrays(_a ) _a : Tuple = model(_a , noise=_a ) _a : int = model(**_a , noise=_a ) self.assert_outputs_same(_a , _a ) def __lowercase ( self , _a , _a , _a ) -> int: # make masks reproducible np.random.seed(2 ) _a : Union[str, Any] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) _a : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) _a : Union[str, Any] = tf.constant(_a ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument _a : Union[str, Any] = tf_noise super().check_pt_tf_models(_a , _a , _a ) def __lowercase ( self ) -> Tuple: # make mask reproducible np.random.seed(2 ) _a , _a : Tuple = self.model_tester.prepare_config_and_inputs_for_common() _a : Any = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(_a ) if module_member_name.endswith('''MainLayer''' ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )] for module_member in (getattr(_a , _a ),) if isinstance(_a , _a ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(_a , '''_keras_serializable''' , _a ) } _a : Tuple = int((config.image_size // config.patch_size) ** 2 ) _a : str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) _a : Any = tf.convert_to_tensor(_a ) inputs_dict.update({'''noise''': noise} ) for main_layer_class in tf_main_layer_classes: _a : Optional[Any] = main_layer_class(_a ) _a : Union[str, Any] = { name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } _a : Tuple = tf.keras.Model(_a , outputs=main_layer(_a ) ) _a : List[str] = model(_a ) with tempfile.TemporaryDirectory() as tmpdirname: _a : Union[str, Any] = os.path.join(_a , '''keras_model.h5''' ) model.save(_a ) _a : Tuple = tf.keras.models.load_model( _a , custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(_a , tf.keras.Model ) _a : Any = model(_a ) self.assert_outputs_same(_a , _a ) @slow def __lowercase ( self ) -> Optional[int]: # make mask reproducible np.random.seed(2 ) _a , _a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() _a : Optional[Any] = int((config.image_size // config.patch_size) ** 2 ) _a : str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: _a : str = model_class(_a ) _a : List[str] = self._prepare_for_class(_a , _a ) _a : List[str] = model(_a , noise=_a ) if model_class.__name__ == "TFViTMAEModel": _a : Any = outputs.last_hidden_state.numpy() _a : int = 0 else: _a : Dict = outputs.logits.numpy() _a : Tuple = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_a , saved_model=_a ) _a : List[Any] = model_class.from_pretrained(_a ) _a : List[str] = model(_a , noise=_a ) if model_class.__name__ == "TFViTMAEModel": _a : Optional[int] = after_outputs['''last_hidden_state'''].numpy() _a : List[Any] = 0 else: _a : List[str] = after_outputs['''logits'''].numpy() _a : Dict = 0 _a : Any = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_a , 1e-5 ) def __lowercase ( self ) -> Optional[Any]: # make mask reproducible np.random.seed(2 ) _a , _a : List[str] = self.model_tester.prepare_config_and_inputs_for_common() _a : Tuple = int((config.image_size // config.patch_size) ** 2 ) _a : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: _a : List[str] = model_class(_a ) _a : str = self._prepare_for_class(_a , _a ) _a : Optional[Any] = model(_a , noise=_a ) _a : int = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(_a ) _a : Any = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config _a : Union[str, Any] = model_class.from_config(model.config ) _a : List[Any] = new_model(_a ) # Build model new_model.set_weights(model.get_weights() ) _a : Tuple = new_model(_a , noise=_a ) self.assert_outputs_same(_a , _a ) @unittest.skip( reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.''' ) def __lowercase ( self ) -> str: pass @unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' ) def __lowercase ( self ) -> int: pass @slow def __lowercase ( self ) -> List[Any]: _a : Union[str, Any] = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' ) self.assertIsNotNone(_a ) def __UpperCAmelCase ( ) -> Optional[int]: """simple docstring""" _a : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" @cached_property def __lowercase ( self ) -> int: return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None @slow def __lowercase ( self ) -> Union[str, Any]: # make random mask reproducible across the PT and TF model np.random.seed(2 ) _a : Optional[Any] = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ) _a : Dict = self.default_image_processor _a : int = prepare_img() _a : Dict = image_processor(images=_a , return_tensors='''tf''' ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) _a : List[Any] = ViTMAEConfig() _a : Optional[Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) _a : List[str] = np.random.uniform(size=(1, num_patches) ) # forward pass _a : Dict = model(**_a , noise=_a ) # verify the logits _a : Optional[Any] = tf.convert_to_tensor([1, 1_9_6, 7_6_8] ) self.assertEqual(outputs.logits.shape , _a ) _a : int = tf.convert_to_tensor( [[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] , _a , atol=1e-4 )
365
import re import tempfile from pathlib import Path import pytest import yaml from datasets.utils.readme import ReadMe # @pytest.fixture # def example_yaml_structure(): a__ = yaml.safe_load( '''\ name: "" allow_empty: false allow_empty_text: true subsections: - name: "Dataset Card for X" # First-level markdown heading allow_empty: false allow_empty_text: true subsections: - name: "Table of Contents" allow_empty: false allow_empty_text: false subsections: null - name: "Dataset Description" allow_empty: false allow_empty_text: false subsections: - name: "Dataset Summary" allow_empty: false allow_empty_text: false subsections: null - name: "Supported Tasks and Leaderboards" allow_empty: true allow_empty_text: true subsections: null - name: Languages allow_empty: false allow_empty_text: true subsections: null ''' ) a__ = { '''name''': '''root''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ { '''name''': '''Dataset Card for My Dataset''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ {'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []}, { '''name''': '''Dataset Description''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Dataset Summary''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [], }, { '''name''': '''Supported Tasks and Leaderboards''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], }, {'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []}, ], }, ], } ], } a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. #### Extra Ignored Subsection ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = { '''name''': '''root''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ { '''name''': '''Dataset Card for My Dataset''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ {'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []}, { '''name''': '''Dataset Description''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Dataset Summary''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Extra Ignored Subsection''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], } ], }, { '''name''': '''Supported Tasks and Leaderboards''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], }, {'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []}, ], }, ], } ], } a__ = '''\ --- --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = ( '''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.''' ) a__ = '''\ # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = ( '''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.''' ) a__ = '''\ --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Languages Language Text ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.''' a__ = '''\ --- language: - zh - en --- ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text # Dataset Card My Dataset ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.''' a__ = '''\ --- language: - zh - en --- # Dataset Card My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.''' a__ = '''''' a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.''' @pytest.mark.parametrize( '''readme_md, expected_dict''' ,[ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] ,) def __UpperCAmelCase ( __a : Union[str, Any] ,__a : List[str] ) -> Optional[int]: """simple docstring""" assert ReadMe.from_string(__a ,__a ).to_dict() == expected_dict @pytest.mark.parametrize( '''readme_md, expected_error''' ,[ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] ,) def __UpperCAmelCase ( __a : List[str] ,__a : Optional[Any] ) -> Union[str, Any]: """simple docstring""" with pytest.raises(__a ,match=re.escape(expected_error.format(path='''root''' ) ) ): _a : List[Any] = ReadMe.from_string(__a ,__a ) readme.validate() @pytest.mark.parametrize( '''readme_md, expected_error''' ,[ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] ,) def __UpperCAmelCase ( __a : Dict ,__a : Dict ) -> Tuple: """simple docstring""" with pytest.raises(__a ,match=re.escape(expected_error.format(path='''root''' ) ) ): ReadMe.from_string(__a ,__a ) @pytest.mark.parametrize( '''readme_md,''' ,[ (README_MULTIPLE_SAME_HEADING_1), ] ,) def __UpperCAmelCase ( __a : Optional[Any] ) -> Tuple: """simple docstring""" ReadMe.from_string(__a ,__a ,suppress_parsing_errors=__a ) @pytest.mark.parametrize( '''readme_md, expected_dict''' ,[ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] ,) def __UpperCAmelCase ( __a : Union[str, Any] ,__a : Any ) -> Optional[int]: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: _a : Tuple = Path(__a ) / '''README.md''' with open(__a ,'''w+''' ) as readme_file: readme_file.write(__a ) _a : Optional[Any] = ReadMe.from_readme(__a ,__a ).to_dict() assert out["name"] == path assert out["text"] == "" assert out["is_empty_text"] assert out["subsections"] == expected_dict["subsections"] @pytest.mark.parametrize( '''readme_md, expected_error''' ,[ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] ,) def __UpperCAmelCase ( __a : List[Any] ,__a : List[Any] ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: _a : int = Path(__a ) / '''README.md''' with open(__a ,'''w+''' ) as readme_file: readme_file.write(__a ) _a : Optional[int] = expected_error.format(path=__a ) with pytest.raises(__a ,match=re.escape(__a ) ): _a : Any = ReadMe.from_readme(__a ,__a ) readme.validate() @pytest.mark.parametrize( '''readme_md, expected_error''' ,[ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] ,) def __UpperCAmelCase ( __a : str ,__a : Union[str, Any] ) -> Dict: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: _a : Optional[Any] = Path(__a ) / '''README.md''' with open(__a ,'''w+''' ) as readme_file: readme_file.write(__a ) _a : str = expected_error.format(path=__a ) with pytest.raises(__a ,match=re.escape(__a ) ): ReadMe.from_readme(__a ,__a ) @pytest.mark.parametrize( '''readme_md,''' ,[ (README_MULTIPLE_SAME_HEADING_1), ] ,) def __UpperCAmelCase ( __a : Optional[Any] ) -> str: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: _a : int = Path(__a ) / '''README.md''' with open(__a ,'''w+''' ) as readme_file: readme_file.write(__a ) ReadMe.from_readme(__a ,__a ,suppress_parsing_errors=__a )
15
0
import unittest from transformers import BertGenerationTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin a__ = '''▁''' a__ = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece class UpperCAmelCase_ ( UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" UpperCAmelCase__ : int = BertGenerationTokenizer UpperCAmelCase__ : Dict = False UpperCAmelCase__ : str = True def __lowercase ( self ) -> List[str]: super().setUp() _a : Optional[Any] = BertGenerationTokenizer(__lowercase , keep_accents=__lowercase ) tokenizer.save_pretrained(self.tmpdirname ) def __lowercase ( self ) -> int: _a : Dict = '''<s>''' _a : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase ) def __lowercase ( self ) -> Optional[int]: _a : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<unk>''' ) self.assertEqual(vocab_keys[1] , '''<s>''' ) self.assertEqual(vocab_keys[-1] , '''<pad>''' ) self.assertEqual(len(__lowercase ) , 1_0_0_2 ) def __lowercase ( self ) -> Dict: self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 ) def __lowercase ( self ) -> Optional[Any]: _a : str = BertGenerationTokenizer(__lowercase , keep_accents=__lowercase ) _a : Optional[Any] = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(__lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__lowercase ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , ) _a : List[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( __lowercase , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) _a : Any = tokenizer.convert_tokens_to_ids(__lowercase ) self.assertListEqual( __lowercase , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , ) _a : Tuple = tokenizer.convert_ids_to_tokens(__lowercase ) self.assertListEqual( __lowercase , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) @cached_property def __lowercase ( self ) -> int: return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) @slow def __lowercase ( self ) -> List[str]: _a : Optional[Any] = '''Hello World!''' _a : Optional[int] = [1_8_5_3_6, 2_2_6_0, 1_0_1] self.assertListEqual(__lowercase , self.big_tokenizer.encode(__lowercase ) ) @slow def __lowercase ( self ) -> List[str]: _a : Optional[Any] = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) _a : Union[str, Any] = [ 8_7_1, 4_1_9, 3_5_8, 9_4_6, 9_9_1, 2_5_2_1, 4_5_2, 3_5_8, 1_3_5_7, 3_8_7, 7_7_5_1, 3_5_3_6, 1_1_2, 9_8_5, 4_5_6, 1_2_6, 8_6_5, 9_3_8, 5_4_0_0, 5_7_3_4, 4_5_8, 1_3_6_8, 4_6_7, 7_8_6, 2_4_6_2, 5_2_4_6, 1_1_5_9, 6_3_3, 8_6_5, 4_5_1_9, 4_5_7, 5_8_2, 8_5_2, 2_5_5_7, 4_2_7, 9_1_6, 5_0_8, 4_0_5, 3_4_3_2_4, 4_9_7, 3_9_1, 4_0_8, 1_1_3_4_2, 1_2_4_4, 3_8_5, 1_0_0, 9_3_8, 9_8_5, 4_5_6, 5_7_4, 3_6_2, 1_2_5_9_7, 3_2_0_0, 3_1_2_9, 1_1_7_2, ] self.assertListEqual(__lowercase , self.big_tokenizer.encode(__lowercase ) ) @require_torch @slow def __lowercase ( self ) -> Optional[int]: import torch from transformers import BertGenerationConfig, BertGenerationEncoder # Build sequence _a : Optional[Any] = list(self.big_tokenizer.get_vocab().keys() )[:1_0] _a : Optional[int] = ''' '''.join(__lowercase ) _a : Optional[int] = self.big_tokenizer.encode_plus(__lowercase , return_tensors='''pt''' , return_token_type_ids=__lowercase ) _a : List[str] = self.big_tokenizer.batch_encode_plus( [sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=__lowercase ) _a : List[Any] = BertGenerationConfig() _a : Optional[Any] = BertGenerationEncoder(__lowercase ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**__lowercase ) model(**__lowercase ) @slow def __lowercase ( self ) -> Dict: # fmt: off _a : List[str] = {'''input_ids''': [[3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4], [4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__lowercase , model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' , revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' , )
366
from __future__ import annotations def __UpperCAmelCase ( __a : list ) -> float: """simple docstring""" if not nums: raise ValueError('''List is empty''' ) return sum(__a ) / len(__a ) if __name__ == "__main__": import doctest doctest.testmod()
15
0
from __future__ import annotations a__ = 1.6021E-19 # units = C def __UpperCAmelCase ( __a : float ,__a : float ,__a : float ,) -> tuple[str, float]: """simple docstring""" if (conductivity, electron_conc, mobility).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif conductivity < 0: raise ValueError('''Conductivity cannot be negative''' ) elif electron_conc < 0: raise ValueError('''Electron concentration cannot be negative''' ) elif mobility < 0: raise ValueError('''mobility cannot be negative''' ) elif conductivity == 0: return ( "conductivity", mobility * electron_conc * ELECTRON_CHARGE, ) elif electron_conc == 0: return ( "electron_conc", conductivity / (mobility * ELECTRON_CHARGE), ) else: return ( "mobility", conductivity / (electron_conc * ELECTRON_CHARGE), ) if __name__ == "__main__": import doctest doctest.testmod()
367
import argparse import os import torch from transformers.utils import WEIGHTS_NAME a__ = ['''small''', '''medium''', '''large'''] a__ = '''lm_head.decoder.weight''' a__ = '''lm_head.weight''' def __UpperCAmelCase ( __a : str ,__a : str ) -> List[str]: """simple docstring""" _a : Any = torch.load(__a ) _a : List[str] = d.pop(__a ) os.makedirs(__a ,exist_ok=__a ) torch.save(__a ,os.path.join(__a ,__a ) ) if __name__ == "__main__": a__ = argparse.ArgumentParser() parser.add_argument('''--dialogpt_path''', default='''.''', type=str) a__ = parser.parse_args() for MODEL in DIALOGPT_MODELS: a__ = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''') a__ = f'''./DialoGPT-{MODEL}''' convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
15
0
import warnings from ...utils import logging from .image_processing_donut import DonutImageProcessor a__ = logging.get_logger(__name__) class UpperCAmelCase_ ( _UpperCamelCase ): """simple docstring""" def __init__( self , *_a , **_a ) -> Any: warnings.warn( '''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use DonutImageProcessor instead.''' , _UpperCAmelCase , ) super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
368
import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class UpperCAmelCase_ ( enum.Enum ): """simple docstring""" UpperCAmelCase__ : int = 0 UpperCAmelCase__ : Union[str, Any] = 1 UpperCAmelCase__ : Optional[Any] = 2 @add_end_docstrings(__lowercase ) class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : Optional[Any] = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n " def __init__( self , *_a , **_a ) -> List[str]: super().__init__(*_a , **_a ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. _a : Dict = None if self.model.config.prefix is not None: _a : List[Any] = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. _a : Optional[Any] = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. _a , _a , _a : str = self._sanitize_parameters(prefix=_a , **self._forward_params ) _a : Optional[Any] = {**self._preprocess_params, **preprocess_params} _a : List[Any] = {**self._forward_params, **forward_params} def __lowercase ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , **_a , ) -> Optional[int]: _a : List[Any] = {} if prefix is not None: _a : Optional[Any] = prefix if prefix: _a : Dict = self.tokenizer( _a , padding=_a , add_special_tokens=_a , return_tensors=self.framework ) _a : Tuple = prefix_inputs['''input_ids'''].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( F"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected""" ''' [None, \'hole\']''' ) _a : Dict = handle_long_generation preprocess_params.update(_a ) _a : Tuple = generate_kwargs _a : Any = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' ) if return_tensors is not None: raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' ) _a : List[str] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' ) _a : Any = ReturnType.TENSORS if return_type is not None: _a : Any = return_type if clean_up_tokenization_spaces is not None: _a : List[Any] = clean_up_tokenization_spaces if stop_sequence is not None: _a : Tuple = self.tokenizer.encode(_a , add_special_tokens=_a ) if len(_a ) > 1: warnings.warn( '''Stopping on a multiple token sequence is not yet supported on transformers. The first token of''' ''' the stop sequence will be used as the stop sequence string in the interim.''' ) _a : List[Any] = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def __lowercase ( self , *_a , **_a ) -> Union[str, Any]: # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'''add_space_before_punct_symbol''': True} ) return super()._parse_and_tokenize(*_a , **_a ) def __call__( self , _a , **_a ) -> List[str]: return super().__call__(_a , **_a ) def __lowercase ( self , _a , _a="" , _a=None , **_a ) -> List[Any]: _a : Optional[int] = self.tokenizer( prefix + prompt_text , padding=_a , add_special_tokens=_a , return_tensors=self.framework ) _a : Union[str, Any] = prompt_text if handle_long_generation == "hole": _a : List[str] = inputs['''input_ids'''].shape[-1] if "max_new_tokens" in generate_kwargs: _a : int = generate_kwargs['''max_new_tokens'''] else: _a : List[Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError('''We cannot infer how many new tokens are expected''' ) if cur_len + new_tokens > self.tokenizer.model_max_length: _a : List[str] = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( '''We cannot use `hole` to handle this generation the number of desired tokens exceeds the''' ''' models max length''' ) _a : List[Any] = inputs['''input_ids'''][:, -keep_length:] if "attention_mask" in inputs: _a : List[str] = inputs['''attention_mask'''][:, -keep_length:] return inputs def __lowercase ( self , _a , **_a ) -> Optional[int]: _a : Any = model_inputs['''input_ids'''] _a : Optional[Any] = model_inputs.get('''attention_mask''' , _a ) # Allow empty prompts if input_ids.shape[1] == 0: _a : int = None _a : int = None _a : List[str] = 1 else: _a : List[Any] = input_ids.shape[0] _a : Union[str, Any] = model_inputs.pop('''prompt_text''' ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. _a : int = generate_kwargs.pop('''prefix_length''' , 0 ) if prefix_length > 0: _a : Tuple = '''max_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].max_new_tokens is not None ) if not has_max_new_tokens: _a : int = generate_kwargs.get('''max_length''' ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length _a : Dict = '''min_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL _a : Optional[Any] = self.model.generate(input_ids=_a , attention_mask=_a , **_a ) _a : int = generated_sequence.shape[0] if self.framework == "pt": _a : Tuple = generated_sequence.reshape(_a , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": _a : List[Any] = tf.reshape(_a , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def __lowercase ( self , _a , _a=ReturnType.FULL_TEXT , _a=True ) -> int: _a : Tuple = model_outputs['''generated_sequence'''][0] _a : int = model_outputs['''input_ids'''] _a : Any = model_outputs['''prompt_text'''] _a : Any = generated_sequence.numpy().tolist() _a : Any = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: _a : Optional[int] = {'''generated_token_ids''': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text _a : str = self.tokenizer.decode( _a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: _a : Union[str, Any] = 0 else: _a : str = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , ) ) if return_type == ReturnType.FULL_TEXT: _a : str = prompt_text + text[prompt_length:] else: _a : List[str] = text[prompt_length:] _a : Union[str, Any] = {'''generated_text''': all_text} records.append(_a ) return records
15
0
import math import time from typing import Dict, List, Optional from torch.utils.data import Dataset from transformers import SeqaSeqTrainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class UpperCAmelCase_ ( __snake_case ): """simple docstring""" def __init__( self , *_a , _a=None , _a=None , **_a ) -> Optional[int]: super().__init__(*UpperCamelCase__ , **UpperCamelCase__ ) _a : Optional[int] = eval_examples _a : Optional[Any] = post_process_function def __lowercase ( self , _a = None , _a=None , _a = None , _a = "eval" , **_a , ) -> Dict[str, float]: _a : Any = gen_kwargs.copy() _a : str = ( gen_kwargs["max_length"] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length ) _a : Any = ( gen_kwargs["num_beams"] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams ) _a : Union[str, Any] = gen_kwargs _a : int = self.eval_dataset if eval_dataset is None else eval_dataset _a : List[str] = self.get_eval_dataloader(UpperCamelCase__ ) _a : int = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. _a : Any = self.compute_metrics _a : Any = None _a : Optional[int] = time.time() _a : Any = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _a : List[str] = eval_loop( UpperCamelCase__ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase__ , metric_key_prefix=UpperCamelCase__ , ) finally: _a : List[Any] = compute_metrics _a : List[Any] = self.args.eval_batch_size * self.args.world_size if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( UpperCamelCase__ , UpperCamelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default _a : int = self.post_process_function(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) _a : Tuple = self.compute_metrics(UpperCamelCase__ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"""{metric_key_prefix}_""" ): _a : Any = metrics.pop(UpperCamelCase__ ) metrics.update(output.metrics ) else: _a : int = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(UpperCamelCase__ ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) _a : str = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase__ ) return metrics def __lowercase ( self , _a , _a , _a=None , _a = "test" , **_a ) -> Union[str, Any]: _a : Dict = gen_kwargs.copy() _a : List[str] = self.get_test_dataloader(UpperCamelCase__ ) # Temporarily disable metric computation, we will do it in the loop here. _a : Optional[int] = self.compute_metrics _a : Optional[int] = None _a : Any = time.time() _a : Dict = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _a : int = eval_loop( UpperCamelCase__ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase__ , metric_key_prefix=UpperCamelCase__ , ) finally: _a : Tuple = compute_metrics _a : Optional[int] = self.args.eval_batch_size * self.args.world_size if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( UpperCamelCase__ , UpperCamelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output _a : List[str] = self.post_process_function(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , '''predict''' ) _a : List[Any] = self.compute_metrics(UpperCamelCase__ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"""{metric_key_prefix}_""" ): _a : int = metrics.pop(UpperCamelCase__ ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase__ )
369
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.test_utils import execute_subprocess_async def __UpperCAmelCase ( __a : Dict=None ) -> str: """simple docstring""" if subparsers is not None: _a : Union[str, Any] = subparsers.add_parser('''test''' ) else: _a : List[str] = argparse.ArgumentParser('''Accelerate test command''' ) parser.add_argument( '''--config_file''' ,default=__a ,help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ) ,) if subparsers is not None: parser.set_defaults(func=__a ) return parser def __UpperCAmelCase ( __a : List[Any] ) -> Union[str, Any]: """simple docstring""" _a : Dict = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] ) if args.config_file is None: _a : List[Any] = script_name else: _a : Union[str, Any] = F"""--config_file={args.config_file} {script_name}""" _a : str = ['''accelerate-launch'''] + test_args.split() _a : str = execute_subprocess_async(__a ,env=os.environ.copy() ) if result.returncode == 0: print('''Test is a success! You are ready for your distributed training!''' ) def __UpperCAmelCase ( ) -> List[Any]: """simple docstring""" _a : Optional[int] = test_command_parser() _a : List[Any] = parser.parse_args() test_command(__a ) if __name__ == "__main__": main()
15
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) a__ = {'''configuration_mbart''': ['''MBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MBartConfig''', '''MBartOnnxConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ = ['''MBartTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ = ['''MBartTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ = [ '''MBART_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MBartForCausalLM''', '''MBartForConditionalGeneration''', '''MBartForQuestionAnswering''', '''MBartForSequenceClassification''', '''MBartModel''', '''MBartPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ = [ '''TFMBartForConditionalGeneration''', '''TFMBartModel''', '''TFMBartPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ = [ '''FlaxMBartForConditionalGeneration''', '''FlaxMBartForQuestionAnswering''', '''FlaxMBartForSequenceClassification''', '''FlaxMBartModel''', '''FlaxMBartPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart import MBartTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart_fast import MBartTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mbart import ( MBART_PRETRAINED_MODEL_ARCHIVE_LIST, MBartForCausalLM, MBartForConditionalGeneration, MBartForQuestionAnswering, MBartForSequenceClassification, MBartModel, MBartPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mbart import ( FlaxMBartForConditionalGeneration, FlaxMBartForQuestionAnswering, FlaxMBartForSequenceClassification, FlaxMBartModel, FlaxMBartPreTrainedModel, ) else: import sys a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
370
import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __lowercase ( self ) -> Union[str, Any]: _a : Optional[Any] = tempfile.mkdtemp() # fmt: off _a : Optional[int] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest'''] # fmt: on _a : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) _a : Any = { '''do_resize''': True, '''size''': {'''height''': 1_8, '''width''': 1_8}, '''do_normalize''': True, '''image_mean''': [0.5, 0.5, 0.5], '''image_std''': [0.5, 0.5, 0.5], } _a : str = os.path.join(self.tmpdirname , _a ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(_a , _a ) def __lowercase ( self , **_a ) -> Any: return BertTokenizer.from_pretrained(self.tmpdirname , **_a ) def __lowercase ( self , **_a ) -> str: return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a ) def __lowercase ( self ) -> List[Any]: shutil.rmtree(self.tmpdirname ) def __lowercase ( self ) -> Any: _a : Union[str, Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] _a : Tuple = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs] return image_inputs def __lowercase ( self ) -> str: _a : List[str] = self.get_tokenizer() _a : Tuple = self.get_image_processor() _a : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) processor.save_pretrained(self.tmpdirname ) _a : Dict = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , _a ) def __lowercase ( self ) -> Dict: _a : List[str] = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _a : Any = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) _a : List[Any] = self.get_image_processor(do_normalize=_a , padding_value=1.0 ) _a : Dict = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_a , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _a ) def __lowercase ( self ) -> Any: _a : Dict = self.get_image_processor() _a : str = self.get_tokenizer() _a : int = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) _a : List[str] = self.prepare_image_inputs() _a : List[Any] = image_processor(_a , return_tensors='''np''' ) _a : Dict = processor(images=_a , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __lowercase ( self ) -> List[str]: _a : Union[str, Any] = self.get_image_processor() _a : Dict = self.get_tokenizer() _a : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) _a : Tuple = '''lower newer''' _a : int = processor(text=_a ) _a : str = tokenizer(_a ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __lowercase ( self ) -> List[Any]: _a : Any = self.get_image_processor() _a : str = self.get_tokenizer() _a : Tuple = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) _a : List[Any] = '''lower newer''' _a : Union[str, Any] = self.prepare_image_inputs() _a : Any = processor(text=_a , images=_a ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with self.assertRaises(_a ): processor() def __lowercase ( self ) -> Optional[int]: _a : Union[str, Any] = self.get_image_processor() _a : List[str] = self.get_tokenizer() _a : Any = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) _a : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _a : int = processor.batch_decode(_a ) _a : int = tokenizer.batch_decode(_a ) self.assertListEqual(_a , _a ) def __lowercase ( self ) -> List[Any]: _a : Tuple = self.get_image_processor() _a : List[str] = self.get_tokenizer() _a : str = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) _a : Optional[int] = '''lower newer''' _a : Dict = self.prepare_image_inputs() _a : Any = processor(text=_a , images=_a ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
15
0
from ..utils import DummyObject, requires_backends class UpperCAmelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCAmelCase__ : Optional[Any] = ["torch", "scipy"] def __init__( self , *_a , **_a ) -> Any: requires_backends(self , ['''torch''', '''scipy'''] ) @classmethod def __lowercase ( cls , *_a , **_a ) -> Tuple: requires_backends(cls , ['''torch''', '''scipy'''] ) @classmethod def __lowercase ( cls , *_a , **_a ) -> Tuple: requires_backends(cls , ['''torch''', '''scipy'''] )
371
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( HubertConfig, HubertForCTC, HubertModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() a__ = logging.get_logger(__name__) a__ = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[int] ,__a : Optional[int] ,__a : List[str] ,__a : List[Any] ) -> Union[str, Any]: """simple docstring""" for attribute in key.split('''.''' ): _a : Optional[Any] = getattr(__a ,__a ) if weight_type is not None: _a : Dict = getattr(__a ,__a ).shape else: _a : Optional[int] = hf_pointer.shape assert hf_shape == value.shape, ( F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": _a : List[Any] = value elif weight_type == "weight_g": _a : Any = value elif weight_type == "weight_v": _a : Union[str, Any] = value elif weight_type == "bias": _a : Optional[int] = value else: _a : List[Any] = value logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def __UpperCAmelCase ( __a : Any ,__a : Union[str, Any] ,__a : Union[str, Any] ) -> int: """simple docstring""" _a : Union[str, Any] = [] _a : Union[str, Any] = fairseq_model.state_dict() _a : Union[str, Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): _a : int = False if "conv_layers" in name: load_conv_layer( __a ,__a ,__a ,__a ,hf_model.config.feat_extract_norm == '''group''' ,) _a : Optional[Any] = True else: for key, mapped_key in MAPPING.items(): _a : Union[str, Any] = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned): _a : Any = True if "*" in mapped_key: _a : Optional[int] = name.split(__a )[0].split('''.''' )[-2] _a : Any = mapped_key.replace('''*''' ,__a ) if "weight_g" in name: _a : List[Any] = '''weight_g''' elif "weight_v" in name: _a : List[str] = '''weight_v''' elif "weight" in name: _a : Any = '''weight''' elif "bias" in name: _a : str = '''bias''' else: _a : Any = None set_recursively(__a ,__a ,__a ,__a ,__a ) continue if not is_used: unused_weights.append(__a ) logger.warning(F"""Unused weights: {unused_weights}""" ) def __UpperCAmelCase ( __a : int ,__a : Optional[Any] ,__a : Dict ,__a : List[str] ,__a : Any ) -> Tuple: """simple docstring""" _a : int = full_name.split('''conv_layers.''' )[-1] _a : Any = name.split('''.''' ) _a : List[Any] = int(items[0] ) _a : Optional[int] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) _a : Optional[int] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) _a : Optional[Any] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) _a : int = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) _a : Any = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(__a ) @torch.no_grad() def __UpperCAmelCase ( __a : Dict ,__a : List[Any] ,__a : List[str]=None ,__a : Optional[int]=None ,__a : int=True ) -> List[Any]: """simple docstring""" if config_path is not None: _a : Tuple = HubertConfig.from_pretrained(__a ) else: _a : Any = HubertConfig() if is_finetuned: if dict_path: _a : Tuple = Dictionary.load(__a ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _a : Any = target_dict.pad_index _a : Tuple = target_dict.bos_index _a : Optional[int] = target_dict.eos_index _a : Optional[Any] = len(target_dict.symbols ) _a : Tuple = os.path.join(__a ,'''vocab.json''' ) if not os.path.isdir(__a ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__a ) ) return os.makedirs(__a ,exist_ok=__a ) with open(__a ,'''w''' ,encoding='''utf-8''' ) as vocab_handle: json.dump(target_dict.indices ,__a ) _a : Tuple = WavaVecaCTCTokenizer( __a ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token='''|''' ,do_lower_case=__a ,) _a : Tuple = True if config.feat_extract_norm == '''layer''' else False _a : List[Any] = WavaVecaFeatureExtractor( feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=__a ,return_attention_mask=__a ,) _a : List[Any] = WavaVecaProcessor(feature_extractor=__a ,tokenizer=__a ) processor.save_pretrained(__a ) _a : Tuple = HubertForCTC(__a ) else: _a : Tuple = HubertModel(__a ) if is_finetuned: _a , _a , _a : int = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] ,arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: _a , _a , _a : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) _a : Any = model[0].eval() recursively_load_weights(__a ,__a ,__a ) hf_wavavec.save_pretrained(__a ) if __name__ == "__main__": a__ = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) a__ = parser.parse_args() convert_hubert_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
15
0
import unittest from transformers import SqueezeBertConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, ) class UpperCAmelCase_ ( __lowercase ): """simple docstring""" def __init__( self , _a , _a=1_3 , _a=7 , _a=True , _a=True , _a=False , _a=True , _a=9_9 , _a=3_2 , _a=5 , _a=4 , _a=6_4 , _a="gelu" , _a=0.1 , _a=0.1 , _a=5_1_2 , _a=1_6 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , _a=2 , _a=2 , _a=2 , _a=2 , _a=4 , _a=1 , ) -> str: _a : Optional[Any] = parent _a : Tuple = batch_size _a : Any = seq_length _a : Optional[Any] = is_training _a : List[Any] = use_input_mask _a : str = use_token_type_ids _a : Dict = use_labels _a : Dict = vocab_size _a : Any = hidden_size _a : List[Any] = num_hidden_layers _a : Any = num_attention_heads _a : Tuple = intermediate_size _a : Dict = hidden_act _a : str = hidden_dropout_prob _a : str = attention_probs_dropout_prob _a : Optional[int] = max_position_embeddings _a : Any = type_vocab_size _a : Union[str, Any] = type_sequence_label_size _a : str = initializer_range _a : Dict = num_labels _a : Optional[Any] = num_choices _a : Optional[int] = scope _a : int = q_groups _a : List[str] = k_groups _a : str = v_groups _a : str = post_attention_groups _a : Dict = intermediate_groups _a : Tuple = output_groups def __lowercase ( self ) -> Union[str, Any]: _a : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _a : Optional[int] = None if self.use_input_mask: _a : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) _a : Any = None _a : Optional[int] = None _a : Any = None if self.use_labels: _a : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _a : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _a : str = ids_tensor([self.batch_size] , self.num_choices ) _a : Optional[int] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowercase ( self ) -> str: return SqueezeBertConfig( embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , ) def __lowercase ( self , _a , _a , _a , _a , _a , _a ) -> str: _a : Tuple = SqueezeBertModel(config=_a ) model.to(_a ) model.eval() _a : Union[str, Any] = model(_a , _a ) _a : Union[str, Any] = model(_a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowercase ( self , _a , _a , _a , _a , _a , _a ) -> Any: _a : int = SqueezeBertForMaskedLM(config=_a ) model.to(_a ) model.eval() _a : List[Any] = model(_a , attention_mask=_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowercase ( self , _a , _a , _a , _a , _a , _a ) -> Tuple: _a : List[str] = SqueezeBertForQuestionAnswering(config=_a ) model.to(_a ) model.eval() _a : Union[str, Any] = model( _a , attention_mask=_a , start_positions=_a , end_positions=_a ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowercase ( self , _a , _a , _a , _a , _a , _a ) -> Optional[int]: _a : Dict = self.num_labels _a : str = SqueezeBertForSequenceClassification(_a ) model.to(_a ) model.eval() _a : Union[str, Any] = model(_a , attention_mask=_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowercase ( self , _a , _a , _a , _a , _a , _a ) -> Optional[Any]: _a : int = self.num_labels _a : Any = SqueezeBertForTokenClassification(config=_a ) model.to(_a ) model.eval() _a : Optional[int] = model(_a , attention_mask=_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowercase ( self , _a , _a , _a , _a , _a , _a ) -> Optional[int]: _a : Dict = self.num_choices _a : List[Any] = SqueezeBertForMultipleChoice(config=_a ) model.to(_a ) model.eval() _a : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _a : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _a : Optional[Any] = model( _a , attention_mask=_a , labels=_a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowercase ( self ) -> str: _a : Optional[Any] = self.prepare_config_and_inputs() (_a) : Tuple = config_and_inputs _a : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ): """simple docstring""" UpperCAmelCase__ : Tuple = ( ( SqueezeBertModel, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, ) if is_torch_available() else None ) UpperCAmelCase__ : Union[str, Any] = ( { "feature-extraction": SqueezeBertModel, "fill-mask": SqueezeBertForMaskedLM, "question-answering": SqueezeBertForQuestionAnswering, "text-classification": SqueezeBertForSequenceClassification, "token-classification": SqueezeBertForTokenClassification, "zero-shot": SqueezeBertForSequenceClassification, } if is_torch_available() else {} ) UpperCAmelCase__ : List[str] = False UpperCAmelCase__ : Any = True UpperCAmelCase__ : Optional[Any] = False def __lowercase ( self ) -> Optional[Any]: _a : Union[str, Any] = SqueezeBertModelTester(self ) _a : List[Any] = ConfigTester(self , config_class=_a , dim=3_7 ) def __lowercase ( self ) -> Union[str, Any]: self.config_tester.run_common_tests() def __lowercase ( self ) -> Tuple: _a : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_model(*_a ) def __lowercase ( self ) -> Optional[Any]: _a : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_masked_lm(*_a ) def __lowercase ( self ) -> Union[str, Any]: _a : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_question_answering(*_a ) def __lowercase ( self ) -> Union[str, Any]: _a : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_a ) def __lowercase ( self ) -> str: _a : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_token_classification(*_a ) def __lowercase ( self ) -> Optional[Any]: _a : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_a ) @slow def __lowercase ( self ) -> List[str]: for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _a : Optional[int] = SqueezeBertModel.from_pretrained(_a ) self.assertIsNotNone(_a ) @require_sentencepiece @require_tokenizers @require_torch class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" @slow def __lowercase ( self ) -> List[str]: _a : int = SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''' ) _a : Optional[int] = torch.tensor([[1, 2_9_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 1_3, 1_5_8_8, 2]] ) _a : List[str] = model(_a )[0] _a : Any = torch.Size((1, 3) ) self.assertEqual(output.shape , _a ) _a : Optional[int] = torch.tensor([[0.6401, -0.0349, -0.6041]] ) self.assertTrue(torch.allclose(_a , _a , atol=1e-4 ) )
350
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : List[str] = ["image_processor", "tokenizer"] UpperCAmelCase__ : str = "ViltImageProcessor" UpperCAmelCase__ : Union[str, Any] = ("BertTokenizer", "BertTokenizerFast") def __init__( self , _a=None , _a=None , **_a ) -> Any: _a : Union[str, Any] = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , _a , ) _a : Dict = kwargs.pop('''feature_extractor''' ) _a : Optional[int] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(_a , _a ) _a : int = self.image_processor def __call__( self , _a , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ) -> BatchEncoding: _a : Tuple = self.tokenizer( text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , ) # add pixel_values + pixel_mask _a : str = self.image_processor(_a , return_tensors=_a ) encoding.update(_a ) return encoding def __lowercase ( self , *_a , **_a ) -> Optional[Any]: return self.tokenizer.batch_decode(*_a , **_a ) def __lowercase ( self , *_a , **_a ) -> str: return self.tokenizer.decode(*_a , **_a ) @property def __lowercase ( self ) -> Optional[int]: _a : str = self.tokenizer.model_input_names _a : Optional[Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def __lowercase ( self ) -> Optional[Any]: warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , ) return self.image_processor_class @property def __lowercase ( self ) -> Any: warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , ) return self.image_processor
15
0
from __future__ import annotations from math import pi def __UpperCAmelCase ( __a : float ,__a : float ,__a : float ) -> dict[str, float]: """simple docstring""" if (inductance, frequency, reactance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if inductance < 0: raise ValueError('''Inductance cannot be negative''' ) if frequency < 0: raise ValueError('''Frequency cannot be negative''' ) if reactance < 0: raise ValueError('''Inductive reactance cannot be negative''' ) if inductance == 0: return {"inductance": reactance / (2 * pi * frequency)} elif frequency == 0: return {"frequency": reactance / (2 * pi * inductance)} elif reactance == 0: return {"reactance": 2 * pi * frequency * inductance} else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
351
from math import ceil def __UpperCAmelCase ( __a : int = 1_001 ) -> int: """simple docstring""" _a : Dict = 1 for i in range(1 ,int(ceil(n / 2.0 ) ) ): _a : int = 2 * i + 1 _a : str = 2 * i _a : Any = total + 4 * odd**2 - 6 * even return total if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution()) else: try: a__ = int(sys.argv[1]) print(solution(n)) except ValueError: print('''Invalid entry - please enter a number''')
15
0
"""simple docstring""" from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def __UpperCAmelCase ( __a : List[str] ) -> str: """simple docstring""" return getitem, k def __UpperCAmelCase ( __a : int ,__a : List[Any] ) -> Optional[int]: """simple docstring""" return setitem, k, v def __UpperCAmelCase ( __a : Any ) -> Optional[Any]: """simple docstring""" return delitem, k def __UpperCAmelCase ( __a : Any ,__a : List[str] ,*__a : Any ) -> Dict: """simple docstring""" try: return fun(__a ,*__a ), None except Exception as e: return None, e a__ = ( _set('''key_a''', '''val_a'''), _set('''key_b''', '''val_b'''), ) a__ = [ _set('''key_a''', '''val_a'''), _set('''key_a''', '''val_b'''), ] a__ = [ _set('''key_a''', '''val_a'''), _set('''key_b''', '''val_b'''), _del('''key_a'''), _del('''key_b'''), _set('''key_a''', '''val_a'''), _del('''key_a'''), ] a__ = [ _get('''key_a'''), _del('''key_a'''), _set('''key_a''', '''val_a'''), _del('''key_a'''), _del('''key_a'''), _get('''key_a'''), ] a__ = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] a__ = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set('''key_a''', '''val_b'''), ] @pytest.mark.parametrize( '''operations''' ,( pytest.param(_add_items ,id='''add items''' ), pytest.param(_overwrite_items ,id='''overwrite items''' ), pytest.param(_delete_items ,id='''delete items''' ), pytest.param(_access_absent_items ,id='''access absent items''' ), pytest.param(_add_with_resize_up ,id='''add with resize up''' ), pytest.param(_add_with_resize_down ,id='''add with resize down''' ), ) ,) def __UpperCAmelCase ( __a : int ) -> int: """simple docstring""" _a : str = HashMap(initial_block_size=4 ) _a : Union[str, Any] = {} for _, (fun, *args) in enumerate(__a ): _a : List[Any] = _run_operation(__a ,__a ,*__a ) _a : List[Any] = _run_operation(__a ,__a ,*__a ) assert my_res == py_res assert str(__a ) == str(__a ) assert set(__a ) == set(__a ) assert len(__a ) == len(__a ) assert set(my.items() ) == set(py.items() ) def __UpperCAmelCase ( ) -> Optional[Any]: """simple docstring""" def is_public(__a : str ) -> bool: return not name.startswith('''_''' ) _a : List[Any] = {name for name in dir({} ) if is_public(__a )} _a : Tuple = {name for name in dir(HashMap() ) if is_public(__a )} assert dict_public_names > hash_public_names
352
from typing import Dict, Iterable, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract a__ = logging.get_logger(__name__) def __UpperCAmelCase ( __a : Union[str, Any] ,__a : str ,__a : Union[str, Any] ) -> List[str]: """simple docstring""" return [ int(1_000 * (box[0] / width) ), int(1_000 * (box[1] / height) ), int(1_000 * (box[2] / width) ), int(1_000 * (box[3] / height) ), ] def __UpperCAmelCase ( __a : np.ndarray ,__a : Optional[str] ,__a : Optional[str] ) -> List[Any]: """simple docstring""" _a : str = to_pil_image(__a ) _a , _a : Optional[Any] = pil_image.size _a : Tuple = pytesseract.image_to_data(__a ,lang=__a ,output_type='''dict''' ,config=__a ) _a , _a , _a , _a , _a : List[str] = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height'''] # filter empty words and corresponding coordinates _a : Dict = [idx for idx, word in enumerate(__a ) if not word.strip()] _a : str = [word for idx, word in enumerate(__a ) if idx not in irrelevant_indices] _a : List[str] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices] _a : Union[str, Any] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices] _a : str = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices] _a : Union[str, Any] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format _a : int = [] for x, y, w, h in zip(__a ,__a ,__a ,__a ): _a : List[str] = [x, y, x + w, y + h] actual_boxes.append(__a ) # finally, normalize the bounding boxes _a : Dict = [] for box in actual_boxes: normalized_boxes.append(normalize_box(__a ,__a ,__a ) ) assert len(__a ) == len(__a ), "Not as many words as there are bounding boxes" return words, normalized_boxes class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : Optional[int] = ["pixel_values"] def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = True , _a = 1 / 2_5_5 , _a = True , _a = None , _a = None , _a = True , _a = None , _a = "" , **_a , ) -> None: super().__init__(**_a ) _a : List[str] = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4} _a : Union[str, Any] = get_size_dict(_a ) _a : int = do_resize _a : Optional[int] = size _a : str = resample _a : str = do_rescale _a : Any = rescale_value _a : Optional[Any] = do_normalize _a : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _a : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD _a : List[Any] = apply_ocr _a : Optional[int] = ocr_lang _a : Tuple = tesseract_config def __lowercase ( self , _a , _a , _a = PILImageResampling.BILINEAR , _a = None , **_a , ) -> np.ndarray: _a : Any = get_size_dict(_a ) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" ) _a : Optional[int] = (size['''height'''], size['''width''']) return resize(_a , size=_a , resample=_a , data_format=_a , **_a ) def __lowercase ( self , _a , _a , _a = None , **_a , ) -> np.ndarray: return rescale(_a , scale=_a , data_format=_a , **_a ) def __lowercase ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray: return normalize(_a , mean=_a , std=_a , data_format=_a , **_a ) def __lowercase ( self , _a , _a = None , _a = None , _a=None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> PIL.Image.Image: _a : Optional[int] = do_resize if do_resize is not None else self.do_resize _a : Union[str, Any] = size if size is not None else self.size _a : Any = get_size_dict(_a ) _a : List[str] = resample if resample is not None else self.resample _a : int = do_rescale if do_rescale is not None else self.do_rescale _a : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor _a : int = do_normalize if do_normalize is not None else self.do_normalize _a : str = image_mean if image_mean is not None else self.image_mean _a : Tuple = image_std if image_std is not None else self.image_std _a : Any = apply_ocr if apply_ocr is not None else self.apply_ocr _a : int = ocr_lang if ocr_lang is not None else self.ocr_lang _a : Optional[int] = tesseract_config if tesseract_config is not None else self.tesseract_config _a : List[Any] = make_list_of_images(_a ) if not valid_images(_a ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' ) # All transformations expect numpy arrays. _a : Any = [to_numpy_array(_a ) for image in images] # Tesseract OCR to get words + normalized bounding boxes if apply_ocr: requires_backends(self , '''pytesseract''' ) _a : str = [] _a : str = [] for image in images: _a , _a : Union[str, Any] = apply_tesseract(_a , _a , _a ) words_batch.append(_a ) boxes_batch.append(_a ) if do_resize: _a : List[str] = [self.resize(image=_a , size=_a , resample=_a ) for image in images] if do_rescale: _a : Optional[Any] = [self.rescale(image=_a , scale=_a ) for image in images] if do_normalize: _a : List[Any] = [self.normalize(image=_a , mean=_a , std=_a ) for image in images] _a : List[str] = [to_channel_dimension_format(_a , _a ) for image in images] _a : List[str] = BatchFeature(data={'''pixel_values''': images} , tensor_type=_a ) if apply_ocr: _a : Optional[int] = words_batch _a : List[Any] = boxes_batch return data
15
0
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging a__ = logging.get_logger(__name__) a__ = { '''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : Optional[Any] = "sew-d" def __init__( self , _a=3_2 , _a=7_6_8 , _a=1_2 , _a=1_2 , _a=3_0_7_2 , _a=2 , _a=5_1_2 , _a=2_5_6 , _a=True , _a=True , _a=("p2c", "c2p") , _a="layer_norm" , _a="gelu_python" , _a=0.1 , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.1 , _a=0.02 , _a=1e-7 , _a=1e-5 , _a="group" , _a="gelu" , _a=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , _a=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _a=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _a=False , _a=1_2_8 , _a=1_6 , _a=True , _a=0.05 , _a=1_0 , _a=2 , _a=0.0 , _a=1_0 , _a=0 , _a="mean" , _a=False , _a=False , _a=2_5_6 , _a=0 , _a=1 , _a=2 , **_a , ) -> int: super().__init__(**_a , pad_token_id=_a , bos_token_id=_a , eos_token_id=_a ) _a : Optional[Any] = hidden_size _a : List[str] = feat_extract_norm _a : List[str] = feat_extract_activation _a : Tuple = list(_a ) _a : List[Any] = list(_a ) _a : List[str] = list(_a ) _a : Tuple = conv_bias _a : str = num_conv_pos_embeddings _a : int = num_conv_pos_embedding_groups _a : List[Any] = len(self.conv_dim ) _a : Optional[int] = num_hidden_layers _a : Optional[int] = intermediate_size _a : int = squeeze_factor _a : Optional[Any] = max_position_embeddings _a : Any = position_buckets _a : List[str] = share_att_key _a : Optional[int] = relative_attention _a : str = norm_rel_ebd _a : List[Any] = list(_a ) _a : Optional[int] = hidden_act _a : int = num_attention_heads _a : str = hidden_dropout _a : List[Any] = attention_dropout _a : int = activation_dropout _a : int = feat_proj_dropout _a : Any = final_dropout _a : Any = layer_norm_eps _a : int = feature_layer_norm_eps _a : Optional[int] = initializer_range _a : Tuple = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' F"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)""" F"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _a : Optional[Any] = apply_spec_augment _a : Optional[int] = mask_time_prob _a : Tuple = mask_time_length _a : str = mask_time_min_masks _a : Union[str, Any] = mask_feature_prob _a : Any = mask_feature_length _a : Optional[int] = mask_feature_min_masks # ctc loss _a : Tuple = ctc_loss_reduction _a : Optional[int] = ctc_zero_infinity # sequence classification _a : Optional[Any] = use_weighted_layer_sum _a : Any = classifier_proj_size @property def __lowercase ( self ) -> List[str]: return functools.reduce(operator.mul , self.conv_stride , 1 )
353
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def __UpperCAmelCase ( ) -> Optional[Any]: """simple docstring""" _a : int = ArgumentParser('''Accelerate CLI tool''' ,usage='''accelerate <command> [<args>]''' ,allow_abbrev=__a ) _a : Optional[int] = parser.add_subparsers(help='''accelerate command helpers''' ) # Register commands get_config_parser(subparsers=__a ) env_command_parser(subparsers=__a ) launch_command_parser(subparsers=__a ) tpu_command_parser(subparsers=__a ) test_command_parser(subparsers=__a ) # Let's go _a : Dict = parser.parse_args() if not hasattr(__a ,'''func''' ): parser.print_help() exit(1 ) # Run args.func(__a ) if __name__ == "__main__": main()
15
0
"""simple docstring""" import argparse import os import re import packaging.version a__ = '''examples/''' a__ = { '''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''), '''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''), '''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''), '''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''), } a__ = { '''init''': '''src/transformers/__init__.py''', '''setup''': '''setup.py''', } a__ = '''README.md''' def __UpperCAmelCase ( __a : List[str] ,__a : int ,__a : Optional[Any] ) -> int: """simple docstring""" with open(__a ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f: _a : Tuple = f.read() _a : str = REPLACE_PATTERNS[pattern] _a : List[str] = replace.replace('''VERSION''' ,__a ) _a : List[Any] = re_pattern.sub(__a ,__a ) with open(__a ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f: f.write(__a ) def __UpperCAmelCase ( __a : Any ) -> List[Any]: """simple docstring""" for folder, directories, fnames in os.walk(__a ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('''research_projects''' ) if "legacy" in directories: directories.remove('''legacy''' ) for fname in fnames: if fname.endswith('''.py''' ): update_version_in_file(os.path.join(__a ,__a ) ,__a ,pattern='''examples''' ) def __UpperCAmelCase ( __a : List[Any] ,__a : List[str]=False ) -> int: """simple docstring""" for pattern, fname in REPLACE_FILES.items(): update_version_in_file(__a ,__a ,__a ) if not patch: update_version_in_examples(__a ) def __UpperCAmelCase ( ) -> List[str]: """simple docstring""" _a : Optional[Any] = '''🤗 Transformers currently provides the following architectures''' _a : str = '''1. Want to contribute a new model?''' with open(__a ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f: _a : Optional[int] = f.readlines() # Find the start of the list. _a : Optional[int] = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 _a : List[Any] = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('''1.''' ): _a : Tuple = lines[index].replace( '''https://huggingface.co/docs/transformers/main/model_doc''' ,'''https://huggingface.co/docs/transformers/model_doc''' ,) index += 1 with open(__a ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f: f.writelines(__a ) def __UpperCAmelCase ( ) -> List[str]: """simple docstring""" with open(REPLACE_FILES['''init'''] ,'''r''' ) as f: _a : Optional[Any] = f.read() _a : Optional[Any] = REPLACE_PATTERNS['''init'''][0].search(__a ).groups()[0] return packaging.version.parse(__a ) def __UpperCAmelCase ( __a : Dict=False ) -> str: """simple docstring""" _a : Optional[Any] = get_version() if patch and default_version.is_devrelease: raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' ) if default_version.is_devrelease: _a : List[Any] = default_version.base_version elif patch: _a : str = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}""" else: _a : List[str] = F"""{default_version.major}.{default_version.minor + 1}.0""" # Now let's ask nicely if that's the right one. _a : Dict = input(F"""Which version are you releasing? [{default_version}]""" ) if len(__a ) == 0: _a : int = default_version print(F"""Updating version to {version}.""" ) global_version_update(__a ,patch=__a ) if not patch: print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() def __UpperCAmelCase ( ) -> Tuple: """simple docstring""" _a : str = get_version() _a : int = F"""{current_version.major}.{current_version.minor + 1}.0.dev0""" _a : List[Any] = current_version.base_version # Check with the user we got that right. _a : Union[str, Any] = input(F"""Which version are we developing now? [{dev_version}]""" ) if len(__a ) == 0: _a : List[str] = dev_version print(F"""Updating version to {version}.""" ) global_version_update(__a ) print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() if __name__ == "__main__": a__ = argparse.ArgumentParser() parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''') parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''') a__ = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('''Nothing to do after a patch :-)''') else: post_release_work()
354
import itertools import os import random import tempfile import unittest import numpy as np from transformers import TvltFeatureExtractor, is_datasets_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch if is_datasets_available(): from datasets import load_dataset a__ = random.Random() def __UpperCAmelCase ( __a : Tuple ,__a : str=1.0 ,__a : Optional[int]=None ,__a : List[Any]=None ) -> Any: """simple docstring""" if rng is None: _a : Dict = global_rng _a : Optional[Any] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __init__( self , _a , _a=7 , _a=4_0_0 , _a=2_0_0_0 , _a=2_0_4_8 , _a=1_2_8 , _a=1 , _a=5_1_2 , _a=3_0 , _a=4_4_1_0_0 , ) -> List[Any]: _a : Optional[Any] = parent _a : str = batch_size _a : List[str] = min_seq_length _a : str = max_seq_length _a : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) _a : List[Any] = spectrogram_length _a : List[str] = feature_size _a : List[Any] = num_audio_channels _a : Tuple = hop_length _a : Optional[int] = chunk_length _a : int = sampling_rate def __lowercase ( self ) -> Union[str, Any]: return { "spectrogram_length": self.spectrogram_length, "feature_size": self.feature_size, "num_audio_channels": self.num_audio_channels, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "sampling_rate": self.sampling_rate, } def __lowercase ( self , _a=False , _a=False ) -> List[Any]: def _flatten(_a ): return list(itertools.chain(*_a ) ) if equal_length: _a : List[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size _a : List[Any] = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: _a : str = [np.asarray(_a ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class UpperCAmelCase_ ( __lowercase , unittest.TestCase ): """simple docstring""" UpperCAmelCase__ : List[Any] = TvltFeatureExtractor def __lowercase ( self ) -> Dict: _a : List[str] = TvltFeatureExtractionTester(self ) def __lowercase ( self ) -> Any: _a : List[Any] = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(_a , '''spectrogram_length''' ) ) self.assertTrue(hasattr(_a , '''feature_size''' ) ) self.assertTrue(hasattr(_a , '''num_audio_channels''' ) ) self.assertTrue(hasattr(_a , '''hop_length''' ) ) self.assertTrue(hasattr(_a , '''chunk_length''' ) ) self.assertTrue(hasattr(_a , '''sampling_rate''' ) ) def __lowercase ( self ) -> Optional[int]: _a : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _a : int = feat_extract_first.save_pretrained(_a )[0] check_json_file_has_correct_format(_a ) _a : Dict = self.feature_extraction_class.from_pretrained(_a ) _a : List[Any] = feat_extract_first.to_dict() _a : Union[str, Any] = feat_extract_second.to_dict() _a : Any = dict_first.pop('''mel_filters''' ) _a : int = dict_second.pop('''mel_filters''' ) self.assertTrue(np.allclose(_a , _a ) ) self.assertEqual(_a , _a ) def __lowercase ( self ) -> Optional[int]: _a : Any = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _a : Optional[int] = os.path.join(_a , '''feat_extract.json''' ) feat_extract_first.to_json_file(_a ) _a : List[str] = self.feature_extraction_class.from_json_file(_a ) _a : List[Any] = feat_extract_first.to_dict() _a : Dict = feat_extract_second.to_dict() _a : str = dict_first.pop('''mel_filters''' ) _a : str = dict_second.pop('''mel_filters''' ) self.assertTrue(np.allclose(_a , _a ) ) self.assertEqual(_a , _a ) def __lowercase ( self ) -> Union[str, Any]: # Initialize feature_extractor _a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict ) # create three inputs of length 800, 1000, and 1200 _a : Any = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )] _a : List[str] = [np.asarray(_a ) for speech_input in speech_inputs] # Test not batched input _a : Tuple = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test batched _a : Dict = feature_extractor(_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test audio masking _a : Union[str, Any] = feature_extractor( _a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 , mask_audio=_a ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test 2-D numpy arrays are batched. _a : Optional[Any] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)] _a : int = np.asarray(_a ) _a : Tuple = feature_extractor(_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) def __lowercase ( self , _a ) -> Optional[Any]: _a : List[Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech _a : Optional[int] = ds.sort('''id''' ).select(range(_a ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def __lowercase ( self ) -> int: _a : Union[str, Any] = self._load_datasamples(1 ) _a : int = TvltFeatureExtractor() _a : Union[str, Any] = feature_extractor(_a , return_tensors='''pt''' ).audio_values self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) ) _a : Union[str, Any] = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] ) self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _a , atol=1e-4 ) )
15
0
import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('''9.1.0'''): a__ = { '''linear''': PIL.Image.Resampling.BILINEAR, '''bilinear''': PIL.Image.Resampling.BILINEAR, '''bicubic''': PIL.Image.Resampling.BICUBIC, '''lanczos''': PIL.Image.Resampling.LANCZOS, '''nearest''': PIL.Image.Resampling.NEAREST, } else: a__ = { '''linear''': PIL.Image.LINEAR, '''bilinear''': PIL.Image.BILINEAR, '''bicubic''': PIL.Image.BICUBIC, '''lanczos''': PIL.Image.LANCZOS, '''nearest''': PIL.Image.NEAREST, } def __UpperCAmelCase ( __a : Union[str, Any] ) -> Optional[Any]: """simple docstring""" _a : List[Any] = (images / 2 + 0.5).clamp(0 ,1 ) _a : List[Any] = images.cpu().permute(0 ,2 ,3 ,1 ).float().numpy() _a : Any = numpy_to_pil(__a ) return images def __UpperCAmelCase ( __a : List[str] ) -> Optional[Any]: """simple docstring""" if images.ndim == 3: _a : Optional[int] = images[None, ...] _a : Tuple = (images * 255).round().astype('''uint8''' ) if images.shape[-1] == 1: # special case for grayscale (single channel) images _a : Optional[int] = [Image.fromarray(image.squeeze() ,mode='''L''' ) for image in images] else: _a : Any = [Image.fromarray(__a ) for image in images] return pil_images
355
from typing import Dict import numpy as np from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException if is_tf_available(): import tensorflow as tf from ..tf_utils import stable_softmax if is_torch_available(): import torch a__ = logging.get_logger(__name__) @add_end_docstrings( __lowercase , r"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , ) class UpperCAmelCase_ ( __lowercase ): """simple docstring""" def __lowercase ( self , _a ) -> np.ndarray: if self.framework == "tf": _a : List[str] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy() elif self.framework == "pt": _a : Tuple = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_a ) else: raise ValueError('''Unsupported framework''' ) return masked_index def __lowercase ( self , _a ) -> np.ndarray: _a : int = self.get_masked_index(_a ) _a : Tuple = np.prod(masked_index.shape ) if numel < 1: raise PipelineException( '''fill-mask''' , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , ) def __lowercase ( self , _a ) -> Optional[int]: if isinstance(_a , _a ): for model_input in model_inputs: self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] ) else: for input_ids in model_inputs["input_ids"]: self._ensure_exactly_one_mask_token(_a ) def __lowercase ( self , _a , _a=None , **_a ) -> Dict[str, GenericTensor]: if return_tensors is None: _a : Union[str, Any] = self.framework _a : str = self.tokenizer(_a , return_tensors=_a ) self.ensure_exactly_one_mask_token(_a ) return model_inputs def __lowercase ( self , _a ) -> Optional[Any]: _a : List[str] = self.model(**_a ) _a : Any = model_inputs['''input_ids'''] return model_outputs def __lowercase ( self , _a , _a=5 , _a=None ) -> str: # Cap top_k if there are targets if target_ids is not None and target_ids.shape[0] < top_k: _a : List[Any] = target_ids.shape[0] _a : Any = model_outputs['''input_ids'''][0] _a : List[str] = model_outputs['''logits'''] if self.framework == "tf": _a : Tuple = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0] _a : List[str] = outputs.numpy() _a : Dict = outputs[0, masked_index, :] _a : str = stable_softmax(_a , axis=-1 ) if target_ids is not None: _a : Any = tf.gather_nd(tf.squeeze(_a , 0 ) , target_ids.reshape(-1 , 1 ) ) _a : Union[str, Any] = tf.expand_dims(_a , 0 ) _a : Optional[int] = tf.math.top_k(_a , k=_a ) _a , _a : Optional[Any] = topk.values.numpy(), topk.indices.numpy() else: _a : Optional[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_a ).squeeze(-1 ) # Fill mask pipeline supports only one ${mask_token} per sample _a : List[str] = outputs[0, masked_index, :] _a : List[Any] = logits.softmax(dim=-1 ) if target_ids is not None: _a : List[Any] = probs[..., target_ids] _a , _a : Optional[Any] = probs.topk(_a ) _a : Dict = [] _a : List[Any] = values.shape[0] == 1 for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ): _a : Optional[Any] = [] for v, p in zip(_values , _predictions ): # Copy is important since we're going to modify this array in place _a : Optional[int] = input_ids.numpy().copy() if target_ids is not None: _a : Tuple = target_ids[p].tolist() _a : List[str] = p # Filter padding out: _a : List[Any] = tokens[np.where(tokens != self.tokenizer.pad_token_id )] # Originally we skip special tokens to give readable output. # For multi masks though, the other [MASK] would be removed otherwise # making the output look odd, so we add them back _a : List[str] = self.tokenizer.decode(_a , skip_special_tokens=_a ) _a : List[Any] = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence} row.append(_a ) result.append(_a ) if single_mask: return result[0] return result def __lowercase ( self , _a , _a=None ) -> Dict: if isinstance(_a , _a ): _a : Tuple = [targets] try: _a : int = self.tokenizer.get_vocab() except Exception: _a : Any = {} _a : List[Any] = [] for target in targets: _a : List[Any] = vocab.get(_a , _a ) if id_ is None: _a : Tuple = self.tokenizer( _a , add_special_tokens=_a , return_attention_mask=_a , return_token_type_ids=_a , max_length=1 , truncation=_a , )['''input_ids'''] if len(_a ) == 0: logger.warning( F"""The specified target token `{target}` does not exist in the model vocabulary. """ '''We cannot replace it with anything meaningful, ignoring it''' ) continue _a : Tuple = input_ids[0] # XXX: If users encounter this pass # it becomes pretty slow, so let's make sure # The warning enables them to fix the input to # get faster performance. logger.warning( F"""The specified target token `{target}` does not exist in the model vocabulary. """ F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" ) target_ids.append(id_ ) _a : List[str] = list(set(_a ) ) if len(_a ) == 0: raise ValueError('''At least one target must be provided when passed.''' ) _a : int = np.array(_a ) return target_ids def __lowercase ( self , _a=None , _a=None ) -> Tuple: _a : str = {} if targets is not None: _a : List[Any] = self.get_target_ids(_a , _a ) _a : Optional[Any] = target_ids if top_k is not None: _a : Union[str, Any] = top_k if self.tokenizer.mask_token_id is None: raise PipelineException( '''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' ) return {}, {}, postprocess_params def __call__( self , _a , *_a , **_a ) -> int: _a : Optional[Any] = super().__call__(_a , **_a ) if isinstance(_a , _a ) and len(_a ) == 1: return outputs[0] return outputs
15
0
import re from filelock import FileLock try: import nltk a__ = True except (ImportError, ModuleNotFoundError): a__ = False if NLTK_AVAILABLE: with FileLock('''.lock''') as lock: nltk.download('''punkt''', quiet=True) def __UpperCAmelCase ( __a : str ) -> str: """simple docstring""" re.sub('''<n>''' ,'''''' ,__a ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(__a ) )
356
import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow a__ = [ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ '''text-classification''', '''language-modeling''', '''summarization''', '''token-classification''', '''question-answering''', ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_mlm_flax import run_qa import run_summarization_flax import run_ta_mlm_flax logging.basicConfig(level=logging.DEBUG) a__ = logging.getLogger() def __UpperCAmelCase ( ) -> Optional[int]: """simple docstring""" _a : Any = argparse.ArgumentParser() parser.add_argument('''-f''' ) _a : Dict = parser.parse_args() return args.f def __UpperCAmelCase ( __a : Optional[int] ,__a : List[str]="eval" ) -> Any: """simple docstring""" _a : Any = os.path.join(__a ,F"""{split}_results.json""" ) if os.path.exists(__a ): with open(__a ,'''r''' ) as f: return json.load(__a ) raise ValueError(F"""can't find {path}""" ) a__ = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class UpperCAmelCase_ ( __lowercase ): """simple docstring""" def __lowercase ( self ) -> str: _a : Any = self.get_auto_remove_tmp_dir() _a : Optional[Any] = F""" run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --eval_steps=2 --warmup_steps=2 --seed=42 --max_seq_length=128 """.split() with patch.object(_a , '''argv''' , _a ): run_flax_glue.main() _a : Any = get_results(_a ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) @slow def __lowercase ( self ) -> Dict: _a : Tuple = self.get_auto_remove_tmp_dir() _a : Tuple = F""" run_clm_flax.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --block_size 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir """.split() with patch.object(_a , '''argv''' , _a ): run_clm_flax.main() _a : List[str] = get_results(_a ) self.assertLess(result['''eval_perplexity'''] , 1_0_0 ) @slow def __lowercase ( self ) -> Optional[int]: _a : str = self.get_auto_remove_tmp_dir() _a : Optional[int] = F""" run_summarization.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --test_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=8 --do_train --do_eval --do_predict --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --predict_with_generate """.split() with patch.object(_a , '''argv''' , _a ): run_summarization_flax.main() _a : Optional[int] = get_results(_a , split='''test''' ) self.assertGreaterEqual(result['''test_rouge1'''] , 1_0 ) self.assertGreaterEqual(result['''test_rouge2'''] , 2 ) self.assertGreaterEqual(result['''test_rougeL'''] , 7 ) self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 ) @slow def __lowercase ( self ) -> Tuple: _a : List[str] = self.get_auto_remove_tmp_dir() _a : List[Any] = F""" run_mlm.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --overwrite_output_dir --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --logging_steps 2 --eval_steps 2 --do_train --do_eval --num_train_epochs=1 """.split() with patch.object(_a , '''argv''' , _a ): run_mlm_flax.main() _a : List[Any] = get_results(_a ) self.assertLess(result['''eval_perplexity'''] , 4_2 ) @slow def __lowercase ( self ) -> Dict: _a : Optional[Any] = self.get_auto_remove_tmp_dir() _a : int = F""" run_t5_mlm_flax.py --model_name_or_path t5-small --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir """.split() with patch.object(_a , '''argv''' , _a ): run_ta_mlm_flax.main() _a : List[Any] = get_results(_a ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 ) @slow def __lowercase ( self ) -> Optional[Any]: # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu _a : Any = 7 if get_gpu_count() > 1 else 2 _a : List[Any] = self.get_auto_remove_tmp_dir() _a : List[Any] = F""" run_flax_ner.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --overwrite_output_dir --do_train --do_eval --warmup_steps=2 --learning_rate=2e-4 --logging_steps 2 --eval_steps 2 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 """.split() with patch.object(_a , '''argv''' , _a ): run_flax_ner.main() _a : Dict = get_results(_a ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) self.assertGreaterEqual(result['''eval_f1'''] , 0.3 ) @slow def __lowercase ( self ) -> Any: _a : Optional[int] = self.get_auto_remove_tmp_dir() _a : Union[str, Any] = F""" run_qa.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=2 --do_train --do_eval --logging_steps 2 --eval_steps 2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 """.split() with patch.object(_a , '''argv''' , _a ): run_qa.main() _a : Any = get_results(_a ) self.assertGreaterEqual(result['''eval_f1'''] , 3_0 ) self.assertGreaterEqual(result['''eval_exact'''] , 3_0 )
15
0
# Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from packaging import version from .. import __version__ from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD from .doc import ( add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, copy_func, replace_return_docstrings, ) from .generic import ( ContextManagers, ExplicitEnum, ModelOutput, PaddingStrategy, TensorType, add_model_info_to_auto_map, cached_property, can_return_loss, expand_dims, find_labels, flatten_dict, infer_framework, is_jax_tensor, is_numpy_array, is_tensor, is_tf_symbolic_tensor, is_tf_tensor, is_torch_device, is_torch_dtype, is_torch_tensor, reshape, squeeze, strtobool, tensor_size, to_numpy, to_py_obj, transpose, working_or_temp_dir, ) from .hub import ( CLOUDFRONT_DISTRIB_PREFIX, DISABLE_TELEMETRY, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, EntryNotFoundError, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, cached_file, default_cache_path, define_sagemaker_information, download_url, extract_commit_hash, get_cached_models, get_file_from_repo, get_full_repo_name, has_file, http_user_agent, is_offline_mode, is_remote_url, move_cache, send_example_telemetry, try_to_load_from_cache, ) from .import_utils import ( ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, USE_JAX, USE_TF, USE_TORCH, DummyObject, OptionalDependencyNotAvailable, _LazyModule, ccl_version, direct_transformers_import, get_torch_version, is_accelerate_available, is_apex_available, is_bitsandbytes_available, is_bsa_available, is_coloredlogs_available, is_cython_available, is_datasets_available, is_decord_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_jieba_available, is_jumanpp_available, is_kenlm_available, is_keras_nlp_available, is_librosa_available, is_natten_available, is_ninja_available, is_onnx_available, is_openai_available, is_optimum_available, is_pandas_available, is_peft_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytest_available, is_pytorch_quantization_available, is_rjieba_available, is_sacremoses_available, is_safetensors_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_sudachi_available, is_tensorflow_probability_available, is_tensorflow_text_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_bfaa_cpu_available, is_torch_bfaa_gpu_available, is_torch_compile_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_neuroncore_available, is_torch_tensorrt_fx_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_torchdistx_available, is_torchdynamo_available, is_torchvision_available, is_training_run_on_sagemaker, is_vision_available, requires_backends, torch_only_method, ) a__ = '''pytorch_model.bin''' a__ = '''pytorch_model.bin.index.json''' a__ = '''adapter_config.json''' a__ = '''adapter_model.bin''' a__ = '''adapter_model.safetensors''' a__ = '''tf_model.h5''' a__ = '''tf_model.h5.index.json''' a__ = '''model.ckpt''' a__ = '''flax_model.msgpack''' a__ = '''flax_model.msgpack.index.json''' a__ = '''model.safetensors''' a__ = '''model.safetensors.index.json''' a__ = '''config.json''' a__ = '''preprocessor_config.json''' a__ = FEATURE_EXTRACTOR_NAME a__ = '''generation_config.json''' a__ = '''modelcard.json''' a__ = '''▁''' a__ = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility a__ = [ [[0, 1, 0, 1], [1, 0, 0, 1]] ] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too. a__ = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]] a__ = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]] def __UpperCAmelCase ( __a : int ) -> Tuple: """simple docstring""" if version.parse(__a ) < version.parse(__a ): if "dev" in min_version: _a : Tuple = ( '''This example requires a source install from HuggingFace Transformers (see ''' '''`https://huggingface.co/docs/transformers/installation#install-from-source`),''' ) else: _a : List[str] = F"""This example requires a minimum version of {min_version},""" error_message += F""" but the version found is {__version__}.\n""" raise ImportError( error_message + '''Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other ''' '''versions of HuggingFace Transformers.''' )
357
import argparse import os import re import packaging.version a__ = '''examples/''' a__ = { '''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''), '''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''), '''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''), '''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''), } a__ = { '''init''': '''src/transformers/__init__.py''', '''setup''': '''setup.py''', } a__ = '''README.md''' def __UpperCAmelCase ( __a : List[str] ,__a : int ,__a : Optional[Any] ) -> int: """simple docstring""" with open(__a ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f: _a : Tuple = f.read() _a , _a : str = REPLACE_PATTERNS[pattern] _a : List[str] = replace.replace('''VERSION''' ,__a ) _a : List[Any] = re_pattern.sub(__a ,__a ) with open(__a ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f: f.write(__a ) def __UpperCAmelCase ( __a : Any ) -> List[Any]: """simple docstring""" for folder, directories, fnames in os.walk(__a ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('''research_projects''' ) if "legacy" in directories: directories.remove('''legacy''' ) for fname in fnames: if fname.endswith('''.py''' ): update_version_in_file(os.path.join(__a ,__a ) ,__a ,pattern='''examples''' ) def __UpperCAmelCase ( __a : List[Any] ,__a : List[str]=False ) -> int: """simple docstring""" for pattern, fname in REPLACE_FILES.items(): update_version_in_file(__a ,__a ,__a ) if not patch: update_version_in_examples(__a ) def __UpperCAmelCase ( ) -> List[str]: """simple docstring""" _a : Optional[Any] = '''🤗 Transformers currently provides the following architectures''' _a : str = '''1. Want to contribute a new model?''' with open(__a ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f: _a : Optional[int] = f.readlines() # Find the start of the list. _a : Optional[int] = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 _a : List[Any] = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('''1.''' ): _a : Tuple = lines[index].replace( '''https://huggingface.co/docs/transformers/main/model_doc''' ,'''https://huggingface.co/docs/transformers/model_doc''' ,) index += 1 with open(__a ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f: f.writelines(__a ) def __UpperCAmelCase ( ) -> List[str]: """simple docstring""" with open(REPLACE_FILES['''init'''] ,'''r''' ) as f: _a : Optional[Any] = f.read() _a : Optional[Any] = REPLACE_PATTERNS['''init'''][0].search(__a ).groups()[0] return packaging.version.parse(__a ) def __UpperCAmelCase ( __a : Dict=False ) -> str: """simple docstring""" _a : Optional[Any] = get_version() if patch and default_version.is_devrelease: raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' ) if default_version.is_devrelease: _a : List[Any] = default_version.base_version elif patch: _a : str = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}""" else: _a : List[str] = F"""{default_version.major}.{default_version.minor + 1}.0""" # Now let's ask nicely if that's the right one. _a : Dict = input(F"""Which version are you releasing? [{default_version}]""" ) if len(__a ) == 0: _a : int = default_version print(F"""Updating version to {version}.""" ) global_version_update(__a ,patch=__a ) if not patch: print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() def __UpperCAmelCase ( ) -> Tuple: """simple docstring""" _a : str = get_version() _a : int = F"""{current_version.major}.{current_version.minor + 1}.0.dev0""" _a : List[Any] = current_version.base_version # Check with the user we got that right. _a : Union[str, Any] = input(F"""Which version are we developing now? [{dev_version}]""" ) if len(__a ) == 0: _a : List[str] = dev_version print(F"""Updating version to {version}.""" ) global_version_update(__a ) print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() if __name__ == "__main__": a__ = argparse.ArgumentParser() parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''') parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''') a__ = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('''Nothing to do after a patch :-)''') else: post_release_work()
15
0
import argparse import collections import os import re import tempfile import pandas as pd from datasets import Dataset from huggingface_hub import hf_hub_download, upload_folder from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/update_metadata.py a__ = '''src/transformers''' # This is to make sure the transformers module imported is the one in the repo. a__ = direct_transformers_import(TRANSFORMERS_PATH) # Regexes that match TF/Flax/PT model names. a__ = re.compile(R'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''') a__ = re.compile(R'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''') # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. a__ = re.compile(R'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''') # Fill this with tuples (pipeline_tag, model_mapping, auto_model) a__ = [ ('''pretraining''', '''MODEL_FOR_PRETRAINING_MAPPING_NAMES''', '''AutoModelForPreTraining'''), ('''feature-extraction''', '''MODEL_MAPPING_NAMES''', '''AutoModel'''), ('''audio-classification''', '''MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForAudioClassification'''), ('''text-generation''', '''MODEL_FOR_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForCausalLM'''), ('''automatic-speech-recognition''', '''MODEL_FOR_CTC_MAPPING_NAMES''', '''AutoModelForCTC'''), ('''image-classification''', '''MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForImageClassification'''), ('''image-segmentation''', '''MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES''', '''AutoModelForImageSegmentation'''), ('''fill-mask''', '''MODEL_FOR_MASKED_LM_MAPPING_NAMES''', '''AutoModelForMaskedLM'''), ('''object-detection''', '''MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES''', '''AutoModelForObjectDetection'''), ( '''zero-shot-object-detection''', '''MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES''', '''AutoModelForZeroShotObjectDetection''', ), ('''question-answering''', '''MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES''', '''AutoModelForQuestionAnswering'''), ('''text2text-generation''', '''MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForSeq2SeqLM'''), ('''text-classification''', '''MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForSequenceClassification'''), ('''automatic-speech-recognition''', '''MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES''', '''AutoModelForSpeechSeq2Seq'''), ( '''table-question-answering''', '''MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES''', '''AutoModelForTableQuestionAnswering''', ), ('''token-classification''', '''MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForTokenClassification'''), ('''multiple-choice''', '''MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES''', '''AutoModelForMultipleChoice'''), ( '''next-sentence-prediction''', '''MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES''', '''AutoModelForNextSentencePrediction''', ), ( '''audio-frame-classification''', '''MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForAudioFrameClassification''', ), ('''audio-xvector''', '''MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES''', '''AutoModelForAudioXVector'''), ( '''document-question-answering''', '''MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES''', '''AutoModelForDocumentQuestionAnswering''', ), ( '''visual-question-answering''', '''MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES''', '''AutoModelForVisualQuestionAnswering''', ), ('''image-to-text''', '''MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES''', '''AutoModelForVision2Seq'''), ( '''zero-shot-image-classification''', '''MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForZeroShotImageClassification''', ), ('''depth-estimation''', '''MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES''', '''AutoModelForDepthEstimation'''), ('''video-classification''', '''MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForVideoClassification'''), ('''mask-generation''', '''MODEL_FOR_MASK_GENERATION_MAPPING_NAMES''', '''AutoModelForMaskGeneration'''), ] def __UpperCAmelCase ( __a : Optional[Any] ) -> Dict: """simple docstring""" _a : List[Any] = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' ,__a ) return [m.group(0 ) for m in matches] def __UpperCAmelCase ( ) -> Dict: """simple docstring""" _a : Union[str, Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES _a : Any = { config.replace('''Config''' ,'''''' ): model_type for model_type, config in config_maping_names.items() } # Dictionaries flagging if each model prefix has a backend in PT/TF/Flax. _a : List[Any] = collections.defaultdict(__a ) _a : int = collections.defaultdict(__a ) _a : Union[str, Any] = collections.defaultdict(__a ) # Let's lookup through all transformers object (once) and find if models are supported by a given backend. for attr_name in dir(__a ): _a : Union[str, Any] = None if _re_tf_models.match(__a ) is not None: _a : Any = tf_models _a : str = _re_tf_models.match(__a ).groups()[0] elif _re_flax_models.match(__a ) is not None: _a : List[str] = flax_models _a : str = _re_flax_models.match(__a ).groups()[0] elif _re_pt_models.match(__a ) is not None: _a : List[str] = pt_models _a : Dict = _re_pt_models.match(__a ).groups()[0] if lookup_dict is not None: while len(__a ) > 0: if attr_name in model_prefix_to_model_type: _a : Union[str, Any] = True break # Try again after removing the last word in the name _a : List[str] = ''''''.join(camel_case_split(__a )[:-1] ) _a : int = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) ) _a : Optional[int] = list(__a ) all_models.sort() _a : Any = {'''model_type''': all_models} _a : Optional[Any] = [pt_models[t] for t in all_models] _a : List[str] = [tf_models[t] for t in all_models] _a : Optional[Any] = [flax_models[t] for t in all_models] # Now let's use the auto-mapping names to make sure _a : Dict = {} for t in all_models: if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES: _a : List[str] = '''AutoProcessor''' elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES: _a : Optional[Any] = '''AutoTokenizer''' elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES: _a : List[str] = '''AutoFeatureExtractor''' else: # Default to AutoTokenizer if a model has nothing, for backward compatibility. _a : str = '''AutoTokenizer''' _a : Union[str, Any] = [processors[t] for t in all_models] return pd.DataFrame(__a ) def __UpperCAmelCase ( __a : str ) -> str: """simple docstring""" _a : Any = [ transformers_module.models.auto.modeling_auto, transformers_module.models.auto.modeling_tf_auto, transformers_module.models.auto.modeling_flax_auto, ] for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS: _a : List[Any] = [model_mapping, F"""TF_{model_mapping}""", F"""FLAX_{model_mapping}"""] _a : int = [auto_class, F"""TF_{auto_class}""", F"""Flax_{auto_class}"""] # Loop through all three frameworks for module, cls, mapping in zip(__a ,__a ,__a ): # The type of pipeline may not exist in this framework if not hasattr(__a ,__a ): continue # First extract all model_names _a : Union[str, Any] = [] for name in getattr(__a ,__a ).values(): if isinstance(__a ,__a ): model_names.append(__a ) else: model_names.extend(list(__a ) ) # Add pipeline tag and auto model class for those models table.update({model_name: (pipeline_tag, cls) for model_name in model_names} ) return table def __UpperCAmelCase ( __a : Union[str, Any] ,__a : List[str] ) -> Optional[Any]: """simple docstring""" _a : int = get_frameworks_table() _a : Optional[int] = Dataset.from_pandas(__a ) _a : str = hf_hub_download( '''huggingface/transformers-metadata''' ,'''pipeline_tags.json''' ,repo_type='''dataset''' ,token=__a ) _a : Tuple = Dataset.from_json(__a ) _a : Optional[int] = { tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class''']) for i in range(len(__a ) ) } _a : Union[str, Any] = update_pipeline_and_auto_class_table(__a ) # Sort the model classes to avoid some nondeterministic updates to create false update commits. _a : Dict = sorted(table.keys() ) _a : str = pd.DataFrame( { '''model_class''': model_classes, '''pipeline_tag''': [table[m][0] for m in model_classes], '''auto_class''': [table[m][1] for m in model_classes], } ) _a : Any = Dataset.from_pandas(__a ) with tempfile.TemporaryDirectory() as tmp_dir: frameworks_dataset.to_json(os.path.join(__a ,'''frameworks.json''' ) ) tags_dataset.to_json(os.path.join(__a ,'''pipeline_tags.json''' ) ) if commit_sha is not None: _a : Dict = ( F"""Update with commit {commit_sha}\n\nSee: """ F"""https://github.com/huggingface/transformers/commit/{commit_sha}""" ) else: _a : int = '''Update''' upload_folder( repo_id='''huggingface/transformers-metadata''' ,folder_path=__a ,repo_type='''dataset''' ,token=__a ,commit_message=__a ,) def __UpperCAmelCase ( ) -> int: """simple docstring""" _a : Any = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS} _a : Union[str, Any] = transformers_module.pipelines.SUPPORTED_TASKS _a : Optional[Any] = [] for key in pipeline_tasks: if key not in in_table: _a : Optional[Any] = pipeline_tasks[key]['''pt'''] if isinstance(__a ,(list, tuple) ): _a : Union[str, Any] = model[0] _a : Any = model.__name__ if model not in in_table.values(): missing.append(__a ) if len(__a ) > 0: _a : Optional[int] = ''', '''.join(__a ) raise ValueError( '''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside ''' F"""`utils/update_metadata.py`: {msg}. Please add them!""" ) if __name__ == "__main__": a__ = argparse.ArgumentParser() parser.add_argument('''--token''', type=str, help='''The token to use to push to the transformers-metadata dataset.''') parser.add_argument('''--commit_sha''', type=str, help='''The sha of the commit going with this update.''') parser.add_argument('''--check-only''', action='''store_true''', help='''Activate to just check all pipelines are present.''') a__ = parser.parse_args() if args.check_only: check_pipeline_tags() else: update_metadata(args.token, args.commit_sha)
358
def __UpperCAmelCase ( __a : int ) -> int: """simple docstring""" if n == 1 or not isinstance(__a ,__a ): return 0 elif n == 2: return 1 else: _a : Any = [0, 1] for i in range(2 ,n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def __UpperCAmelCase ( __a : int ) -> int: """simple docstring""" _a : Any = 0 _a : Dict = 2 while digits < n: index += 1 _a : Dict = len(str(fibonacci(__a ) ) ) return index def __UpperCAmelCase ( __a : int = 1_000 ) -> int: """simple docstring""" return fibonacci_digits_index(__a ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
15
0
import importlib.util import os import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import ( is_accelerate_available, is_flax_available, is_safetensors_available, is_tf_available, is_torch_available, ) from . import BaseTransformersCLICommand def __UpperCAmelCase ( __a : Tuple ) -> str: """simple docstring""" return EnvironmentCommand() def __UpperCAmelCase ( __a : Union[str, Any] ) -> str: """simple docstring""" return EnvironmentCommand(args.accelerate_config_file ) class UpperCAmelCase_ ( __lowercase ): """simple docstring""" @staticmethod def __lowercase ( _a ) -> int: _a : str = parser.add_parser('''env''' ) download_parser.set_defaults(func=_a ) download_parser.add_argument( '''--accelerate-config_file''' , default=_a , help='''The accelerate config file to use for the default values in the launching script.''' , ) download_parser.set_defaults(func=_a ) def __init__( self , _a , *_a ) -> None: _a : Union[str, Any] = accelerate_config_file def __lowercase ( self ) -> Optional[int]: _a : Tuple = '''not installed''' if is_safetensors_available(): import safetensors _a : Dict = safetensors.__version__ elif importlib.util.find_spec('''safetensors''' ) is not None: import safetensors _a : str = F"""{safetensors.__version__} but is ignored because of PyTorch version too old.""" _a : Optional[Any] = '''not installed''' _a : List[Any] = '''not found''' if is_accelerate_available(): import accelerate from accelerate.commands.config import default_config_file, load_config_from_file _a : Optional[int] = accelerate.__version__ # Get the default from the config file. if self._accelerate_config_file is not None or os.path.isfile(_a ): _a : Optional[int] = load_config_from_file(self._accelerate_config_file ).to_dict() _a : Optional[int] = ( '''\n'''.join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] ) if isinstance(_a , _a ) else F"""\t{accelerate_config}""" ) _a : Tuple = '''not installed''' _a : Tuple = '''NA''' if is_torch_available(): import torch _a : str = torch.__version__ _a : int = torch.cuda.is_available() _a : List[Any] = '''not installed''' _a : Optional[Any] = '''NA''' if is_tf_available(): import tensorflow as tf _a : int = tf.__version__ try: # deprecated in v2.1 _a : Dict = tf.test.is_gpu_available() except AttributeError: # returns list of devices, convert to bool _a : List[str] = bool(tf.config.list_physical_devices('''GPU''' ) ) _a : Dict = '''not installed''' _a : Optional[Any] = '''not installed''' _a : Optional[Any] = '''not installed''' _a : List[Any] = '''NA''' if is_flax_available(): import flax import jax import jaxlib _a : Optional[Any] = flax.__version__ _a : Union[str, Any] = jax.__version__ _a : Union[str, Any] = jaxlib.__version__ _a : Tuple = jax.lib.xla_bridge.get_backend().platform _a : Tuple = { '''`transformers` version''': version, '''Platform''': platform.platform(), '''Python version''': platform.python_version(), '''Huggingface_hub version''': huggingface_hub.__version__, '''Safetensors version''': F"""{safetensors_version}""", '''Accelerate version''': F"""{accelerate_version}""", '''Accelerate config''': F"""{accelerate_config_str}""", '''PyTorch version (GPU?)''': F"""{pt_version} ({pt_cuda_available})""", '''Tensorflow version (GPU?)''': F"""{tf_version} ({tf_cuda_available})""", '''Flax version (CPU?/GPU?/TPU?)''': F"""{flax_version} ({jax_backend})""", '''Jax version''': F"""{jax_version}""", '''JaxLib version''': F"""{jaxlib_version}""", '''Using GPU in script?''': '''<fill in>''', '''Using distributed or parallel set-up in script?''': '''<fill in>''', } print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' ) print(self.format_dict(_a ) ) return info @staticmethod def __lowercase ( _a ) -> Optional[int]: return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
359
from sklearn.metrics import fa_score, matthews_corrcoef import datasets from .record_evaluation import evaluate as evaluate_record a__ = '''\ @article{wang2019superglue, title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems}, author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R}, journal={arXiv preprint arXiv:1905.00537}, year={2019} } ''' a__ = '''\ SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after GLUE with a new set of more difficult language understanding tasks, improved resources, and a new public leaderboard. ''' a__ = ''' Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset. Args: predictions: list of predictions to score. Depending on the SuperGlUE subset: - for \'record\': list of question-answer dictionaries with the following keys: - \'idx\': index of the question as specified by the dataset - \'prediction_text\': the predicted answer text - for \'multirc\': list of question-answer dictionaries with the following keys: - \'idx\': index of the question-answer pair as specified by the dataset - \'prediction\': the predicted answer label - otherwise: list of predicted labels references: list of reference labels. Depending on the SuperGLUE subset: - for \'record\': list of question-answers dictionaries with the following keys: - \'idx\': index of the question as specified by the dataset - \'answers\': list of possible answers - otherwise: list of reference labels Returns: depending on the SuperGLUE subset: - for \'record\': - \'exact_match\': Exact match between answer and gold answer - \'f1\': F1 score - for \'multirc\': - \'exact_match\': Exact match between answer and gold answer - \'f1_m\': Per-question macro-F1 score - \'f1_a\': Average F1 score over all answers - for \'axb\': \'matthews_correlation\': Matthew Correlation - for \'cb\': - \'accuracy\': Accuracy - \'f1\': F1 score - for all others: - \'accuracy\': Accuracy Examples: >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"] >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\') >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0, \'f1\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\') >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}] >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 1.0, \'f1\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\') >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'matthews_correlation\': 1.0} ''' def __UpperCAmelCase ( __a : int ,__a : List[str] ) -> Optional[Any]: """simple docstring""" return float((preds == labels).mean() ) def __UpperCAmelCase ( __a : List[Any] ,__a : Union[str, Any] ,__a : List[str]="binary" ) -> Optional[int]: """simple docstring""" _a : List[str] = simple_accuracy(__a ,__a ) _a : Any = float(fa_score(y_true=__a ,y_pred=__a ,average=__a ) ) return { "accuracy": acc, "f1": fa, } def __UpperCAmelCase ( __a : Optional[Any] ,__a : str ) -> List[Any]: """simple docstring""" _a : Union[str, Any] = {} for id_pred, label in zip(__a ,__a ): _a : Optional[int] = F"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}""" _a : Optional[Any] = id_pred['''prediction'''] if question_id in question_map: question_map[question_id].append((pred, label) ) else: _a : str = [(pred, label)] _a , _a : Any = [], [] for question, preds_labels in question_map.items(): _a , _a : Any = zip(*__a ) _a : List[Any] = fa_score(y_true=__a ,y_pred=__a ,average='''macro''' ) fas.append(__a ) _a : List[str] = int(sum(pred == label for pred, label in preds_labels ) == len(__a ) ) ems.append(__a ) _a : List[str] = float(sum(__a ) / len(__a ) ) _a : str = sum(__a ) / len(__a ) _a : Optional[int] = float(fa_score(y_true=__a ,y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) ) return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a} @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase_ ( datasets.Metric ): """simple docstring""" def __lowercase ( self ) -> List[Any]: if self.config_name not in [ "boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg", ]: raise KeyError( '''You should supply a configuration name selected in ''' '''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , ) def __lowercase ( self ) -> Any: if self.config_name == "record": return { "predictions": { "idx": { "passage": datasets.Value('''int64''' ), "query": datasets.Value('''int64''' ), }, "prediction_text": datasets.Value('''string''' ), }, "references": { "idx": { "passage": datasets.Value('''int64''' ), "query": datasets.Value('''int64''' ), }, "answers": datasets.Sequence(datasets.Value('''string''' ) ), }, } elif self.config_name == "multirc": return { "predictions": { "idx": { "answer": datasets.Value('''int64''' ), "paragraph": datasets.Value('''int64''' ), "question": datasets.Value('''int64''' ), }, "prediction": datasets.Value('''int64''' ), }, "references": datasets.Value('''int64''' ), } else: return { "predictions": datasets.Value('''int64''' ), "references": datasets.Value('''int64''' ), } def __lowercase ( self , _a , _a ) -> Optional[Any]: if self.config_name == "axb": return {"matthews_correlation": matthews_corrcoef(_a , _a )} elif self.config_name == "cb": return acc_and_fa(_a , _a , fa_avg='''macro''' ) elif self.config_name == "record": _a : Any = [ { '''qas''': [ {'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]} for ref in references ] } ] _a : Any = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions} return evaluate_record(_a , _a )[0] elif self.config_name == "multirc": return evaluate_multirc(_a , _a ) elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]: return {"accuracy": simple_accuracy(_a , _a )} else: raise KeyError( '''You should supply a configuration name selected in ''' '''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
15
0
import argparse import requests import torch from PIL import Image from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor def __UpperCAmelCase ( __a : int ) -> Union[str, Any]: """simple docstring""" _a : Any = SwinConfig(image_size=192 ) if "base" in model_name: _a : Optional[int] = 6 _a : int = 128 _a : str = (2, 2, 18, 2) _a : Tuple = (4, 8, 16, 32) elif "large" in model_name: _a : Union[str, Any] = 12 _a : List[Any] = 192 _a : str = (2, 2, 18, 2) _a : Optional[Any] = (6, 12, 24, 48) else: raise ValueError('''Model not supported, only supports base and large variants''' ) _a : List[str] = window_size _a : str = embed_dim _a : Union[str, Any] = depths _a : Optional[Any] = num_heads return config def __UpperCAmelCase ( __a : Tuple ) -> Dict: """simple docstring""" if "encoder.mask_token" in name: _a : int = name.replace('''encoder.mask_token''' ,'''embeddings.mask_token''' ) if "encoder.patch_embed.proj" in name: _a : List[str] = name.replace('''encoder.patch_embed.proj''' ,'''embeddings.patch_embeddings.projection''' ) if "encoder.patch_embed.norm" in name: _a : Tuple = name.replace('''encoder.patch_embed.norm''' ,'''embeddings.norm''' ) if "attn.proj" in name: _a : List[str] = name.replace('''attn.proj''' ,'''attention.output.dense''' ) if "attn" in name: _a : int = name.replace('''attn''' ,'''attention.self''' ) if "norm1" in name: _a : List[str] = name.replace('''norm1''' ,'''layernorm_before''' ) if "norm2" in name: _a : int = name.replace('''norm2''' ,'''layernorm_after''' ) if "mlp.fc1" in name: _a : Optional[Any] = name.replace('''mlp.fc1''' ,'''intermediate.dense''' ) if "mlp.fc2" in name: _a : int = name.replace('''mlp.fc2''' ,'''output.dense''' ) if name == "encoder.norm.weight": _a : Dict = '''layernorm.weight''' if name == "encoder.norm.bias": _a : Optional[Any] = '''layernorm.bias''' if "decoder" in name: pass else: _a : List[Any] = '''swin.''' + name return name def __UpperCAmelCase ( __a : Dict ,__a : Dict ) -> List[str]: """simple docstring""" for key in orig_state_dict.copy().keys(): _a : Tuple = orig_state_dict.pop(__a ) if "attn_mask" in key: pass elif "qkv" in key: _a : List[str] = key.split('''.''' ) _a : Dict = int(key_split[2] ) _a : Any = int(key_split[4] ) _a : Tuple = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: _a : str = val[:dim, :] _a : List[Any] = val[ dim : dim * 2, : ] _a : Tuple = val[-dim:, :] else: _a : Optional[Any] = val[ :dim ] _a : List[Any] = val[ dim : dim * 2 ] _a : str = val[ -dim: ] else: _a : Dict = val return orig_state_dict def __UpperCAmelCase ( __a : Any ,__a : Optional[Any] ,__a : Union[str, Any] ,__a : Any ) -> List[Any]: """simple docstring""" _a : Any = torch.load(__a ,map_location='''cpu''' )['''model'''] _a : Optional[Any] = get_swin_config(__a ) _a : int = SwinForMaskedImageModeling(__a ) model.eval() _a : List[str] = convert_state_dict(__a ,__a ) model.load_state_dict(__a ) _a : Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg''' _a : Tuple = ViTImageProcessor(size={'''height''': 192, '''width''': 192} ) _a : Any = Image.open(requests.get(__a ,stream=__a ).raw ) _a : str = image_processor(images=__a ,return_tensors='''pt''' ) with torch.no_grad(): _a : List[Any] = model(**__a ).logits print(outputs.keys() ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__a ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(__a ) if push_to_hub: print(F"""Pushing model and image processor for {model_name} to hub""" ) model.push_to_hub(F"""microsoft/{model_name}""" ) image_processor.push_to_hub(F"""microsoft/{model_name}""" ) if __name__ == "__main__": a__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''swin-base-simmim-window6-192''', type=str, choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''], help='''Name of the Swin SimMIM model you\'d like to convert.''', ) parser.add_argument( '''--checkpoint_path''', default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''', type=str, help='''Path to the original PyTorch checkpoint (.pth file).''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) a__ = parser.parse_args() convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
360
import numpy as np def __UpperCAmelCase ( __a : np.ndarray ,__a : np.ndarray ,__a : float = 1E-12 ,__a : int = 100 ,) -> tuple[float, np.ndarray]: """simple docstring""" assert np.shape(__a )[0] == np.shape(__a )[1] # Ensure proper dimensionality. assert np.shape(__a )[0] == np.shape(__a )[0] # Ensure inputs are either both complex or both real assert np.iscomplexobj(__a ) == np.iscomplexobj(__a ) _a : List[str] = np.iscomplexobj(__a ) if is_complex: # Ensure complex input_matrix is Hermitian assert np.array_equal(__a ,input_matrix.conj().T ) # Set convergence to False. Will define convergence when we exceed max_iterations # or when we have small changes from one iteration to next. _a : List[str] = False _a : List[str] = 0 _a : Tuple = 0 _a : str = 1E12 while not convergence: # Multiple matrix by the vector. _a : str = np.dot(__a ,__a ) # Normalize the resulting output vector. _a : List[Any] = w / np.linalg.norm(__a ) # Find rayleigh quotient # (faster than usual b/c we know vector is normalized already) _a : Dict = vector.conj().T if is_complex else vector.T _a : Tuple = np.dot(__a ,np.dot(__a ,__a ) ) # Check convergence. _a : List[str] = np.abs(lambda_ - lambda_previous ) / lambda_ iterations += 1 if error <= error_tol or iterations >= max_iterations: _a : Dict = True _a : str = lambda_ if is_complex: _a : Tuple = np.real(lambda_ ) return lambda_, vector def __UpperCAmelCase ( ) -> None: """simple docstring""" _a : List[str] = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] ) _a : int = np.array([41, 4, 20] ) _a : Optional[Any] = real_input_matrix.astype(np.complexaaa ) _a : int = np.triu(1j * complex_input_matrix ,1 ) complex_input_matrix += imag_matrix complex_input_matrix += -1 * imag_matrix.T _a : Union[str, Any] = np.array([41, 4, 20] ).astype(np.complexaaa ) for problem_type in ["real", "complex"]: if problem_type == "real": _a : Optional[int] = real_input_matrix _a : Union[str, Any] = real_vector elif problem_type == "complex": _a : str = complex_input_matrix _a : str = complex_vector # Our implementation. _a , _a : Optional[Any] = power_iteration(__a ,__a ) # Numpy implementation. # Get eigenvalues and eigenvectors using built-in numpy # eigh (eigh used for symmetric or hermetian matrices). _a , _a : List[str] = np.linalg.eigh(__a ) # Last eigenvalue is the maximum one. _a : Tuple = eigen_values[-1] # Last column in this matrix is eigenvector corresponding to largest eigenvalue. _a : List[Any] = eigen_vectors[:, -1] # Check our implementation and numpy gives close answers. assert np.abs(eigen_value - eigen_value_max ) <= 1E-6 # Take absolute values element wise of each eigenvector. # as they are only unique to a minus sign. assert np.linalg.norm(np.abs(__a ) - np.abs(__a ) ) <= 1E-6 if __name__ == "__main__": import doctest doctest.testmod() test_power_iteration()
15
0
def __UpperCAmelCase ( __a : list ) -> list: """simple docstring""" if len(__a ) <= 1: return [tuple(__a )] _a : Tuple = [] def generate(__a : int ,__a : list ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 ,__a ) for i in range(k - 1 ): if k % 2 == 0: # k is even _a : Dict = arr[k - 1], arr[i] else: # k is odd _a : int = arr[k - 1], arr[0] generate(k - 1 ,__a ) generate(len(__a ) ,__a ) return res if __name__ == "__main__": a__ = input('''Enter numbers separated by a comma:\n''').strip() a__ = [int(item) for item in user_input.split(''',''')] print(heaps(arr))
361
import itertools from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class UpperCAmelCase_ ( datasets.BuilderConfig ): """simple docstring""" UpperCAmelCase__ : Optional[datasets.Features] = None class UpperCAmelCase_ ( datasets.ArrowBasedBuilder ): """simple docstring""" UpperCAmelCase__ : Any = PandasConfig def __lowercase ( self ) -> Any: return datasets.DatasetInfo(features=self.config.features ) def __lowercase ( self , _a ) -> List[Any]: if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) _a : str = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_a , (str, list, tuple) ): _a : Dict = data_files if isinstance(_a , _a ): _a : Dict = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive _a : int = [dl_manager.iter_files(_a ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] _a : Optional[Any] = [] for split_name, files in data_files.items(): if isinstance(_a , _a ): _a : List[str] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive _a : Any = [dl_manager.iter_files(_a ) for file in files] splits.append(datasets.SplitGenerator(name=_a , gen_kwargs={'''files''': files} ) ) return splits def __lowercase ( self , _a ) -> pa.Table: if self.config.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example _a : Optional[Any] = table_cast(_a , self.config.features.arrow_schema ) return pa_table def __lowercase ( self , _a ) -> List[str]: for i, file in enumerate(itertools.chain.from_iterable(_a ) ): with open(_a , '''rb''' ) as f: _a : str = pa.Table.from_pandas(pd.read_pickle(_a ) ) yield i, self._cast_table(_a )
15
0
def __UpperCAmelCase ( __a : int ) -> bool: """simple docstring""" if not isinstance(__a ,__a ): _a : Any = F"""Input value of [number={number}] must be an integer""" raise TypeError(__a ) if number < 0: return False _a : Tuple = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
362
def __UpperCAmelCase ( __a : int ,__a : int ,__a : int ) -> int: """simple docstring""" if exponent == 1: return base if exponent % 2 == 0: _a : List[Any] = _modexpt(__a ,exponent // 2 ,__a ) % modulo_value return (x * x) % modulo_value else: return (base * _modexpt(__a ,exponent - 1 ,__a )) % modulo_value def __UpperCAmelCase ( __a : int = 1_777 ,__a : int = 1_855 ,__a : int = 8 ) -> int: """simple docstring""" _a : List[Any] = base for _ in range(1 ,__a ): _a : Any = _modexpt(__a ,__a ,10**digits ) return result if __name__ == "__main__": print(f'''{solution() = }''')
15
0
import warnings from pathlib import Path from typing import List, Tuple, Union import fire from torch import nn from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel from transformers.utils import logging a__ = logging.get_logger(__name__) def __UpperCAmelCase ( __a : nn.ModuleList ,__a : nn.ModuleList ,__a : List[int] ) -> None: """simple docstring""" _a : Optional[Any] = nn.ModuleList([src_layers[i] for i in layers_to_copy] ) assert len(__a ) == len(__a ), F"""{len(__a )} != {len(__a )}""" dest_layers.load_state_dict(layers_to_copy.state_dict() ) a__ = { # maps num layers in teacher -> num_layers in student -> which teacher layers to copy. # 12: bart, 16: pegasus, 6: marian/Helsinki-NLP 12: { 1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher 2: [0, 6], 3: [0, 6, 11], 4: [0, 4, 8, 11], 6: [0, 2, 4, 7, 9, 11], 9: [0, 1, 2, 4, 5, 7, 9, 10, 11], 12: list(range(12)), }, 16: { # maps num layers in student -> which teacher layers to copy 1: [0], 2: [0, 15], 3: [0, 8, 15], 4: [0, 5, 10, 15], 6: [0, 3, 6, 9, 12, 15], 8: [0, 2, 4, 6, 8, 10, 12, 15], 9: [0, 1, 3, 5, 7, 9, 11, 13, 15], 12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15], 16: list(range(16)), }, 6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))}, } a__ = { # maps num layers in student -> which teacher layers to copy. 6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]}, 12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]}, 16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]}, } def __UpperCAmelCase ( __a : str ,__a : Union[str, Any] ) -> int: """simple docstring""" try: _a : int = LAYERS_TO_COPY[n_teacher][n_student] return val except KeyError: if n_student != n_teacher: warnings.warn( F"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first""" F""" {n_student}""" ) return list(range(__a ) ) def __UpperCAmelCase ( __a : Dict ,__a : List[str] ) -> List[int]: """simple docstring""" if n_student > n_teacher: raise ValueError(F"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" ) elif n_teacher == n_student: return list(range(__a ) ) elif n_student == 1: return [n_teacher - 1] else: return LAYERS_TO_SUPERVISE[n_teacher][n_student] def __UpperCAmelCase ( __a : Union[str, PreTrainedModel] ,__a : Union[str, Path] = "student" ,__a : Union[int, None] = None ,__a : Union[int, None] = None ,__a : Any=False ,__a : Any=None ,__a : int=None ,**__a : Optional[int] ,) -> Tuple[PreTrainedModel, List[int], List[int]]: """simple docstring""" _a : Dict = '''encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.''' assert (e is not None) or (d is not None), _msg if isinstance(__a ,__a ): AutoTokenizer.from_pretrained(__a ).save_pretrained(__a ) # purely for convenience _a : List[str] = AutoModelForSeqaSeqLM.from_pretrained(__a ).eval() else: assert isinstance(__a ,__a ), F"""teacher must be a model or string got type {type(__a )}""" _a : Dict = teacher.config.to_diff_dict() try: _a : List[Any] = teacher.config.encoder_layers, teacher.config.decoder_layers if e is None: _a : Union[str, Any] = teacher_e if d is None: _a : List[Any] = teacher_d init_kwargs.update({'''encoder_layers''': e, '''decoder_layers''': d} ) except AttributeError: # T5 if hasattr(teacher.config ,'''num_encoder_layers''' ): _a : List[str] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers else: _a : Optional[Any] = teacher.config.num_layers, teacher.config.num_decoder_layers if e is None: _a : Optional[int] = teacher_e if d is None: _a : Optional[Any] = teacher_d if hasattr(teacher.config ,'''num_encoder_layers''' ): init_kwargs.update({'''num_encoder_layers''': e, '''num_decoder_layers''': d} ) else: init_kwargs.update({'''num_layers''': e, '''num_decoder_layers''': d} ) # Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs init_kwargs.update(__a ) # Copy weights _a : List[str] = teacher.config_class(**__a ) _a : Dict = AutoModelForSeqaSeqLM.from_config(__a ) # Start by copying the full teacher state dict this will copy the first N teacher layers to the student. _a : str = student.load_state_dict(teacher.state_dict() ,strict=__a ) assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys. if copy_first_teacher_layers: # Our copying is done. We just log and save _a : List[str] = list(range(__a ) ), list(range(__a ) ) logger.info( F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to""" F""" {save_path}""" ) student.save_pretrained(__a ) return student, e_layers_to_copy, d_layers_to_copy # Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer. if e_layers_to_copy is None: _a : List[int] = pick_layers_to_copy(__a ,__a ) if d_layers_to_copy is None: _a : List[int] = pick_layers_to_copy(__a ,__a ) try: if hasattr( __a ,'''prophetnet''' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers copy_layers(teacher.prophetnet.encoder.layers ,student.prophetnet.encoder.layers ,__a ) copy_layers(teacher.prophetnet.decoder.layers ,student.prophetnet.decoder.layers ,__a ) else: copy_layers(teacher.model.encoder.layers ,student.model.encoder.layers ,__a ) copy_layers(teacher.model.decoder.layers ,student.model.decoder.layers ,__a ) except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block copy_layers(teacher.encoder.block ,student.encoder.block ,__a ) copy_layers(teacher.decoder.block ,student.decoder.block ,__a ) logger.info( F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" ) _a : List[Any] = { '''teacher_type''': teacher.config.model_type, '''copied_encoder_layers''': e_layers_to_copy, '''copied_decoder_layers''': d_layers_to_copy, } student.save_pretrained(__a ) # Save information about copying for easier reproducibility return student, e_layers_to_copy, d_layers_to_copy if __name__ == "__main__": fire.Fire(create_student_by_copying_alternating_layers)
363
import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging a__ = '''\ ''' a__ = ''' Perplexity (PPL) is one of the most common metrics for evaluating language models. It is defined as the exponentiated average negative log-likelihood of a sequence. For more information, see https://huggingface.co/docs/transformers/perplexity ''' a__ = ''' Args: model_id (str): model used for calculating Perplexity NOTE: Perplexity can only be calculated for causal language models. This includes models such as gpt2, causal variations of bert, causal versions of t5, and more (the full list can be found in the AutoModelForCausalLM documentation here: https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM ) input_texts (list of str): input text, each separate text snippet is one list entry. batch_size (int): the batch size to run texts through the model. Defaults to 16. add_start_token (bool): whether to add the start token to the texts, so the perplexity can include the probability of the first word. Defaults to True. device (str): device to run on, defaults to \'cuda\' when available Returns: perplexity: dictionary containing the perplexity scores for the texts in the input list, as well as the mean perplexity. If one of the input texts is longer than the max input length of the model, then it is truncated to the max length for the perplexity computation. Examples: Example 1: >>> perplexity = datasets.load_metric("perplexity") >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"] >>> results = perplexity.compute(model_id=\'gpt2\', ... add_start_token=False, ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) [\'perplexities\', \'mean_perplexity\'] >>> print(round(results["mean_perplexity"], 2)) 78.22 >>> print(round(results["perplexities"][0], 2)) 11.11 Example 2: >>> perplexity = datasets.load_metric("perplexity") >>> input_texts = datasets.load_dataset("wikitext", ... "wikitext-2-raw-v1", ... split="test")["text"][:50] # doctest:+ELLIPSIS [...] >>> input_texts = [s for s in input_texts if s!=\'\'] >>> results = perplexity.compute(model_id=\'gpt2\', ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) [\'perplexities\', \'mean_perplexity\'] >>> print(round(results["mean_perplexity"], 2)) 60.35 >>> print(round(results["perplexities"][0], 2)) 81.12 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase_ ( datasets.Metric ): """simple docstring""" def __lowercase ( self ) -> Any: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''input_texts''': datasets.Value('''string''' ), } ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , ) def __lowercase ( self , _a , _a , _a = 1_6 , _a = True , _a=None ) -> List[Any]: if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": _a : List[str] = '''cuda''' else: _a : Optional[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu''' _a : Dict = AutoModelForCausalLM.from_pretrained(_a ) _a : List[Any] = model.to(_a ) _a : List[str] = AutoTokenizer.from_pretrained(_a ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: _a : str = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(_a ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" _a : List[Any] = model.config.max_length - 1 else: _a : List[str] = model.config.max_length _a : Union[str, Any] = tokenizer( _a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , return_tensors='''pt''' , return_attention_mask=_a , ).to(_a ) _a : List[Any] = encodings['''input_ids'''] _a : int = encodings['''attention_mask'''] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." _a : Optional[int] = [] _a : Dict = CrossEntropyLoss(reduction='''none''' ) for start_index in logging.tqdm(range(0 , len(_a ) , _a ) ): _a : Dict = min(start_index + batch_size , len(_a ) ) _a : Union[str, Any] = encoded_texts[start_index:end_index] _a : int = attn_masks[start_index:end_index] if add_start_token: _a : Dict = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_a ) _a : List[str] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) _a : Dict = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_a ), attn_mask] , dim=1 ) _a : Dict = encoded_batch with torch.no_grad(): _a : Any = model(_a , attention_mask=_a ).logits _a : List[str] = out_logits[..., :-1, :].contiguous() _a : Union[str, Any] = labels[..., 1:].contiguous() _a : Optional[int] = attn_mask[..., 1:].contiguous() _a : Union[str, Any] = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , _a ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(_a )}
15
0
"""simple docstring""" from typing import Dict import numpy as np from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException if is_tf_available(): import tensorflow as tf from ..tf_utils import stable_softmax if is_torch_available(): import torch a__ = logging.get_logger(__name__) @add_end_docstrings( __lowercase , r"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , ) class UpperCAmelCase_ ( __lowercase ): """simple docstring""" def __lowercase ( self , _a ) -> np.ndarray: if self.framework == "tf": _a : List[str] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy() elif self.framework == "pt": _a : Tuple = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_a ) else: raise ValueError('''Unsupported framework''' ) return masked_index def __lowercase ( self , _a ) -> np.ndarray: _a : int = self.get_masked_index(_a ) _a : Tuple = np.prod(masked_index.shape ) if numel < 1: raise PipelineException( '''fill-mask''' , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , ) def __lowercase ( self , _a ) -> Optional[int]: if isinstance(_a , _a ): for model_input in model_inputs: self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] ) else: for input_ids in model_inputs["input_ids"]: self._ensure_exactly_one_mask_token(_a ) def __lowercase ( self , _a , _a=None , **_a ) -> Dict[str, GenericTensor]: if return_tensors is None: _a : Union[str, Any] = self.framework _a : str = self.tokenizer(_a , return_tensors=_a ) self.ensure_exactly_one_mask_token(_a ) return model_inputs def __lowercase ( self , _a ) -> Optional[Any]: _a : List[str] = self.model(**_a ) _a : Any = model_inputs['''input_ids'''] return model_outputs def __lowercase ( self , _a , _a=5 , _a=None ) -> str: # Cap top_k if there are targets if target_ids is not None and target_ids.shape[0] < top_k: _a : List[Any] = target_ids.shape[0] _a : Any = model_outputs['''input_ids'''][0] _a : List[str] = model_outputs['''logits'''] if self.framework == "tf": _a : Tuple = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0] _a : List[str] = outputs.numpy() _a : Dict = outputs[0, masked_index, :] _a : str = stable_softmax(_a , axis=-1 ) if target_ids is not None: _a : Any = tf.gather_nd(tf.squeeze(_a , 0 ) , target_ids.reshape(-1 , 1 ) ) _a : Union[str, Any] = tf.expand_dims(_a , 0 ) _a : Optional[int] = tf.math.top_k(_a , k=_a ) _a : Optional[Any] = topk.values.numpy(), topk.indices.numpy() else: _a : Optional[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_a ).squeeze(-1 ) # Fill mask pipeline supports only one ${mask_token} per sample _a : List[str] = outputs[0, masked_index, :] _a : List[Any] = logits.softmax(dim=-1 ) if target_ids is not None: _a : List[Any] = probs[..., target_ids] _a : Optional[Any] = probs.topk(_a ) _a : Dict = [] _a : List[Any] = values.shape[0] == 1 for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ): _a : Optional[Any] = [] for v, p in zip(_values , _predictions ): # Copy is important since we're going to modify this array in place _a : Optional[int] = input_ids.numpy().copy() if target_ids is not None: _a : Tuple = target_ids[p].tolist() _a : List[str] = p # Filter padding out: _a : List[Any] = tokens[np.where(tokens != self.tokenizer.pad_token_id )] # Originally we skip special tokens to give readable output. # For multi masks though, the other [MASK] would be removed otherwise # making the output look odd, so we add them back _a : List[str] = self.tokenizer.decode(_a , skip_special_tokens=_a ) _a : List[Any] = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence} row.append(_a ) result.append(_a ) if single_mask: return result[0] return result def __lowercase ( self , _a , _a=None ) -> Dict: if isinstance(_a , _a ): _a : Tuple = [targets] try: _a : int = self.tokenizer.get_vocab() except Exception: _a : Any = {} _a : List[Any] = [] for target in targets: _a : List[Any] = vocab.get(_a , _a ) if id_ is None: _a : Tuple = self.tokenizer( _a , add_special_tokens=_a , return_attention_mask=_a , return_token_type_ids=_a , max_length=1 , truncation=_a , )['''input_ids'''] if len(_a ) == 0: logger.warning( F"""The specified target token `{target}` does not exist in the model vocabulary. """ '''We cannot replace it with anything meaningful, ignoring it''' ) continue _a : Tuple = input_ids[0] # XXX: If users encounter this pass # it becomes pretty slow, so let's make sure # The warning enables them to fix the input to # get faster performance. logger.warning( F"""The specified target token `{target}` does not exist in the model vocabulary. """ F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" ) target_ids.append(id_ ) _a : List[str] = list(set(_a ) ) if len(_a ) == 0: raise ValueError('''At least one target must be provided when passed.''' ) _a : int = np.array(_a ) return target_ids def __lowercase ( self , _a=None , _a=None ) -> Tuple: _a : str = {} if targets is not None: _a : List[Any] = self.get_target_ids(_a , _a ) _a : Optional[Any] = target_ids if top_k is not None: _a : Union[str, Any] = top_k if self.tokenizer.mask_token_id is None: raise PipelineException( '''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' ) return {}, {}, postprocess_params def __call__( self , _a , *_a , **_a ) -> int: _a : Optional[Any] = super().__call__(_a , **_a ) if isinstance(_a , _a ) and len(_a ) == 1: return outputs[0] return outputs
364
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a__ = { '''configuration_xmod''': [ '''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XmodConfig''', '''XmodOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ = [ '''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XmodForCausalLM''', '''XmodForMaskedLM''', '''XmodForMultipleChoice''', '''XmodForQuestionAnswering''', '''XmodForSequenceClassification''', '''XmodForTokenClassification''', '''XmodModel''', '''XmodPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
15
0
a__ = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} a__ = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def __UpperCAmelCase ( __a : dict[int, list[int]] ,__a : int ,__a : list[bool] ) -> list[int]: """simple docstring""" _a : Tuple = True _a : List[Any] = [] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(__a ,__a ,__a ) order.append(__a ) return order def __UpperCAmelCase ( __a : dict[int, list[int]] ,__a : int ,__a : list[bool] ) -> list[int]: """simple docstring""" _a : Tuple = True _a : List[str] = [vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(__a ,__a ,__a ) return component def __UpperCAmelCase ( __a : dict[int, list[int]] ) -> list[list[int]]: """simple docstring""" _a : Tuple = len(__a ) * [False] _a : dict[int, list[int]] = {vert: [] for vert in range(len(__a ) )} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(__a ) _a : Dict = [] for i, was_visited in enumerate(__a ): if not was_visited: order += topology_sort(__a ,__a ,__a ) _a : Optional[Any] = [] _a : Tuple = len(__a ) * [False] for i in range(len(__a ) ): _a : Dict = order[len(__a ) - i - 1] if not visited[vert]: _a : int = find_components(__a ,__a ,__a ) components_list.append(__a ) return components_list
365
import re import tempfile from pathlib import Path import pytest import yaml from datasets.utils.readme import ReadMe # @pytest.fixture # def example_yaml_structure(): a__ = yaml.safe_load( '''\ name: "" allow_empty: false allow_empty_text: true subsections: - name: "Dataset Card for X" # First-level markdown heading allow_empty: false allow_empty_text: true subsections: - name: "Table of Contents" allow_empty: false allow_empty_text: false subsections: null - name: "Dataset Description" allow_empty: false allow_empty_text: false subsections: - name: "Dataset Summary" allow_empty: false allow_empty_text: false subsections: null - name: "Supported Tasks and Leaderboards" allow_empty: true allow_empty_text: true subsections: null - name: Languages allow_empty: false allow_empty_text: true subsections: null ''' ) a__ = { '''name''': '''root''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ { '''name''': '''Dataset Card for My Dataset''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ {'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []}, { '''name''': '''Dataset Description''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Dataset Summary''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [], }, { '''name''': '''Supported Tasks and Leaderboards''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], }, {'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []}, ], }, ], } ], } a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. #### Extra Ignored Subsection ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = { '''name''': '''root''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ { '''name''': '''Dataset Card for My Dataset''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ {'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []}, { '''name''': '''Dataset Description''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Dataset Summary''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Extra Ignored Subsection''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], } ], }, { '''name''': '''Supported Tasks and Leaderboards''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], }, {'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []}, ], }, ], } ], } a__ = '''\ --- --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = ( '''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.''' ) a__ = '''\ # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = ( '''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.''' ) a__ = '''\ --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Languages Language Text ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.''' a__ = '''\ --- language: - zh - en --- ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text # Dataset Card My Dataset ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.''' a__ = '''\ --- language: - zh - en --- # Dataset Card My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.''' a__ = '''''' a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.''' @pytest.mark.parametrize( '''readme_md, expected_dict''' ,[ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] ,) def __UpperCAmelCase ( __a : Union[str, Any] ,__a : List[str] ) -> Optional[int]: """simple docstring""" assert ReadMe.from_string(__a ,__a ).to_dict() == expected_dict @pytest.mark.parametrize( '''readme_md, expected_error''' ,[ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] ,) def __UpperCAmelCase ( __a : List[str] ,__a : Optional[Any] ) -> Union[str, Any]: """simple docstring""" with pytest.raises(__a ,match=re.escape(expected_error.format(path='''root''' ) ) ): _a : List[Any] = ReadMe.from_string(__a ,__a ) readme.validate() @pytest.mark.parametrize( '''readme_md, expected_error''' ,[ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] ,) def __UpperCAmelCase ( __a : Dict ,__a : Dict ) -> Tuple: """simple docstring""" with pytest.raises(__a ,match=re.escape(expected_error.format(path='''root''' ) ) ): ReadMe.from_string(__a ,__a ) @pytest.mark.parametrize( '''readme_md,''' ,[ (README_MULTIPLE_SAME_HEADING_1), ] ,) def __UpperCAmelCase ( __a : Optional[Any] ) -> Tuple: """simple docstring""" ReadMe.from_string(__a ,__a ,suppress_parsing_errors=__a ) @pytest.mark.parametrize( '''readme_md, expected_dict''' ,[ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] ,) def __UpperCAmelCase ( __a : Union[str, Any] ,__a : Any ) -> Optional[int]: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: _a : Tuple = Path(__a ) / '''README.md''' with open(__a ,'''w+''' ) as readme_file: readme_file.write(__a ) _a : Optional[Any] = ReadMe.from_readme(__a ,__a ).to_dict() assert out["name"] == path assert out["text"] == "" assert out["is_empty_text"] assert out["subsections"] == expected_dict["subsections"] @pytest.mark.parametrize( '''readme_md, expected_error''' ,[ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] ,) def __UpperCAmelCase ( __a : List[Any] ,__a : List[Any] ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: _a : int = Path(__a ) / '''README.md''' with open(__a ,'''w+''' ) as readme_file: readme_file.write(__a ) _a : Optional[int] = expected_error.format(path=__a ) with pytest.raises(__a ,match=re.escape(__a ) ): _a : Any = ReadMe.from_readme(__a ,__a ) readme.validate() @pytest.mark.parametrize( '''readme_md, expected_error''' ,[ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] ,) def __UpperCAmelCase ( __a : str ,__a : Union[str, Any] ) -> Dict: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: _a : Optional[Any] = Path(__a ) / '''README.md''' with open(__a ,'''w+''' ) as readme_file: readme_file.write(__a ) _a : str = expected_error.format(path=__a ) with pytest.raises(__a ,match=re.escape(__a ) ): ReadMe.from_readme(__a ,__a ) @pytest.mark.parametrize( '''readme_md,''' ,[ (README_MULTIPLE_SAME_HEADING_1), ] ,) def __UpperCAmelCase ( __a : Optional[Any] ) -> str: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: _a : int = Path(__a ) / '''README.md''' with open(__a ,'''w+''' ) as readme_file: readme_file.write(__a ) ReadMe.from_readme(__a ,__a ,suppress_parsing_errors=__a )
15
0
def __UpperCAmelCase ( __a : int ) -> int: """simple docstring""" if not isinstance(__a ,__a ): _a : str = F"""Input value of [number={number}] must be an integer""" raise TypeError(__a ) if number < 1: _a : Optional[Any] = F"""Input value of [number={number}] must be > 0""" raise ValueError(__a ) _a : Optional[int] = 1 for i in range(1 ,__a ): current_number *= 4 * i - 2 current_number //= i + 1 return current_number if __name__ == "__main__": import doctest doctest.testmod()
366
from __future__ import annotations def __UpperCAmelCase ( __a : list ) -> float: """simple docstring""" if not nums: raise ValueError('''List is empty''' ) return sum(__a ) / len(__a ) if __name__ == "__main__": import doctest doctest.testmod()
15
0
import unittest import torch from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel from diffusers.training_utils import set_seed from diffusers.utils.testing_utils import slow a__ = False class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __lowercase ( self , _a=3_2 ) -> str: set_seed(0 ) _a : str = UNetaDModel(sample_size=_a , in_channels=3 , out_channels=3 ) _a : Tuple = torch.optim.SGD(model.parameters() , lr=0.0001 ) return model, optimizer @slow def __lowercase ( self ) -> Optional[int]: _a : List[Any] = '''cpu''' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable _a : Any = DDPMScheduler( num_train_timesteps=1_0_0_0 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=_a , ) _a : Tuple = DDIMScheduler( num_train_timesteps=1_0_0_0 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=_a , ) assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps # shared batches for DDPM and DDIM set_seed(0 ) _a : Dict = [torch.randn((4, 3, 3_2, 3_2) ).clip(-1 , 1 ).to(_a ) for _ in range(4 )] _a : List[Any] = [torch.randn((4, 3, 3_2, 3_2) ).to(_a ) for _ in range(4 )] _a : Tuple = [torch.randint(0 , 1_0_0_0 , (4,) ).long().to(_a ) for _ in range(4 )] # train with a DDPM scheduler _a : List[str] = self.get_model_optimizer(resolution=3_2 ) model.train().to(_a ) for i in range(4 ): optimizer.zero_grad() _a : Optional[int] = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) _a : Dict = model(_a , timesteps[i] ).sample _a : Tuple = torch.nn.functional.mse_loss(_a , noise[i] ) loss.backward() optimizer.step() del model, optimizer # recreate the model and optimizer, and retry with DDIM _a : List[Any] = self.get_model_optimizer(resolution=3_2 ) model.train().to(_a ) for i in range(4 ): optimizer.zero_grad() _a : Dict = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) _a : Tuple = model(_a , timesteps[i] ).sample _a : List[str] = torch.nn.functional.mse_loss(_a , noise[i] ) loss.backward() optimizer.step() del model, optimizer self.assertTrue(torch.allclose(_a , _a , atol=1e-5 ) ) self.assertTrue(torch.allclose(_a , _a , atol=1e-5 ) )
367
import argparse import os import torch from transformers.utils import WEIGHTS_NAME a__ = ['''small''', '''medium''', '''large'''] a__ = '''lm_head.decoder.weight''' a__ = '''lm_head.weight''' def __UpperCAmelCase ( __a : str ,__a : str ) -> List[str]: """simple docstring""" _a : Any = torch.load(__a ) _a : List[str] = d.pop(__a ) os.makedirs(__a ,exist_ok=__a ) torch.save(__a ,os.path.join(__a ,__a ) ) if __name__ == "__main__": a__ = argparse.ArgumentParser() parser.add_argument('''--dialogpt_path''', default='''.''', type=str) a__ = parser.parse_args() for MODEL in DIALOGPT_MODELS: a__ = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''') a__ = f'''./DialoGPT-{MODEL}''' convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
15
0
import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : int = "char" UpperCAmelCase__ : Optional[int] = "bpe" UpperCAmelCase__ : List[str] = "wp" a__ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : List[Any] = ["image_processor", "char_tokenizer"] UpperCAmelCase__ : List[Any] = "ViTImageProcessor" UpperCAmelCase__ : List[Any] = "MgpstrTokenizer" def __init__( self , _a=None , _a=None , **_a ) -> Any: _a : int = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , _a , ) _a : List[Any] = kwargs.pop('''feature_extractor''' ) _a : Optional[int] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) _a : List[str] = tokenizer _a : str = AutoTokenizer.from_pretrained('''gpt2''' ) _a : Dict = AutoTokenizer.from_pretrained('''bert-base-uncased''' ) super().__init__(_a , _a ) def __call__( self , _a=None , _a=None , _a=None , **_a ) -> Any: if images is None and text is None: raise ValueError('''You need to specify either an `images` or `text` input to process.''' ) if images is not None: _a : str = self.image_processor(_a , return_tensors=_a , **_a ) if text is not None: _a : Union[str, Any] = self.char_tokenizer(_a , return_tensors=_a , **_a ) if text is None: return inputs elif images is None: return encodings else: _a : Optional[int] = encodings['''input_ids'''] return inputs def __lowercase ( self , _a ) -> List[str]: _a : str = sequences _a : Optional[int] = char_preds.size(0 ) _a : List[Any] = self._decode_helper(_a , '''char''' ) _a : Optional[Any] = self._decode_helper(_a , '''bpe''' ) _a : int = self._decode_helper(_a , '''wp''' ) _a : Tuple = [] _a : Union[str, Any] = [] for i in range(_a ): _a : Dict = [char_scores[i], bpe_scores[i], wp_scores[i]] _a : List[Any] = [char_strs[i], bpe_strs[i], wp_strs[i]] _a : Optional[int] = scores.index(max(_a ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) _a : Optional[int] = {} _a : int = final_strs _a : List[str] = final_scores _a : Union[str, Any] = char_strs _a : Optional[int] = bpe_strs _a : List[str] = wp_strs return out def __lowercase ( self , _a , _a ) -> Tuple: if format == DecodeType.CHARACTER: _a : Tuple = self.char_decode _a : List[Any] = 1 _a : str = '''[s]''' elif format == DecodeType.BPE: _a : Tuple = self.bpe_decode _a : Union[str, Any] = 2 _a : List[str] = '''#''' elif format == DecodeType.WORDPIECE: _a : Optional[Any] = self.wp_decode _a : Optional[int] = 1_0_2 _a : Optional[Any] = '''[SEP]''' else: raise ValueError(F"""Format {format} is not supported.""" ) _a : str = [], [] _a : List[Any] = pred_logits.size(0 ) _a : Dict = pred_logits.size(1 ) _a : List[str] = pred_logits.topk(1 , dim=-1 , largest=_a , sorted=_a ) _a : Optional[Any] = preds_index.view(-1 , _a )[:, 1:] _a : Tuple = decoder(_a ) _a : Tuple = torch.nn.functional.softmax(_a , dim=2 ).max(dim=2 ) _a : Any = preds_max_prob[:, 1:] for index in range(_a ): _a : str = preds_str[index].find(_a ) _a : List[str] = preds_str[index][:pred_eos] _a : str = preds_index[index].cpu().tolist() _a : Tuple = pred_index.index(_a ) if eos_token in pred_index else -1 _a : Optional[Any] = preds_max_prob[index][: pred_eos_index + 1] _a : Any = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(_a ) conf_scores.append(_a ) return dec_strs, conf_scores def __lowercase ( self , _a ) -> Union[str, Any]: _a : str = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(_a )] return decode_strs def __lowercase ( self , _a ) -> Any: return self.bpe_tokenizer.batch_decode(_a ) def __lowercase ( self , _a ) -> Union[str, Any]: _a : Tuple = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(_a )] return decode_strs
368
import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class UpperCAmelCase_ ( enum.Enum ): """simple docstring""" UpperCAmelCase__ : int = 0 UpperCAmelCase__ : Union[str, Any] = 1 UpperCAmelCase__ : Optional[Any] = 2 @add_end_docstrings(__lowercase ) class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : Optional[Any] = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n " def __init__( self , *_a , **_a ) -> List[str]: super().__init__(*_a , **_a ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. _a : Dict = None if self.model.config.prefix is not None: _a : List[Any] = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. _a : Optional[Any] = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. _a , _a , _a : str = self._sanitize_parameters(prefix=_a , **self._forward_params ) _a : Optional[Any] = {**self._preprocess_params, **preprocess_params} _a : List[Any] = {**self._forward_params, **forward_params} def __lowercase ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , **_a , ) -> Optional[int]: _a : List[Any] = {} if prefix is not None: _a : Optional[Any] = prefix if prefix: _a : Dict = self.tokenizer( _a , padding=_a , add_special_tokens=_a , return_tensors=self.framework ) _a : Tuple = prefix_inputs['''input_ids'''].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( F"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected""" ''' [None, \'hole\']''' ) _a : Dict = handle_long_generation preprocess_params.update(_a ) _a : Tuple = generate_kwargs _a : Any = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' ) if return_tensors is not None: raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' ) _a : List[str] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' ) _a : Any = ReturnType.TENSORS if return_type is not None: _a : Any = return_type if clean_up_tokenization_spaces is not None: _a : List[Any] = clean_up_tokenization_spaces if stop_sequence is not None: _a : Tuple = self.tokenizer.encode(_a , add_special_tokens=_a ) if len(_a ) > 1: warnings.warn( '''Stopping on a multiple token sequence is not yet supported on transformers. The first token of''' ''' the stop sequence will be used as the stop sequence string in the interim.''' ) _a : List[Any] = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def __lowercase ( self , *_a , **_a ) -> Union[str, Any]: # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'''add_space_before_punct_symbol''': True} ) return super()._parse_and_tokenize(*_a , **_a ) def __call__( self , _a , **_a ) -> List[str]: return super().__call__(_a , **_a ) def __lowercase ( self , _a , _a="" , _a=None , **_a ) -> List[Any]: _a : Optional[int] = self.tokenizer( prefix + prompt_text , padding=_a , add_special_tokens=_a , return_tensors=self.framework ) _a : Union[str, Any] = prompt_text if handle_long_generation == "hole": _a : List[str] = inputs['''input_ids'''].shape[-1] if "max_new_tokens" in generate_kwargs: _a : int = generate_kwargs['''max_new_tokens'''] else: _a : List[Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError('''We cannot infer how many new tokens are expected''' ) if cur_len + new_tokens > self.tokenizer.model_max_length: _a : List[str] = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( '''We cannot use `hole` to handle this generation the number of desired tokens exceeds the''' ''' models max length''' ) _a : List[Any] = inputs['''input_ids'''][:, -keep_length:] if "attention_mask" in inputs: _a : List[str] = inputs['''attention_mask'''][:, -keep_length:] return inputs def __lowercase ( self , _a , **_a ) -> Optional[int]: _a : Any = model_inputs['''input_ids'''] _a : Optional[Any] = model_inputs.get('''attention_mask''' , _a ) # Allow empty prompts if input_ids.shape[1] == 0: _a : int = None _a : int = None _a : List[str] = 1 else: _a : List[Any] = input_ids.shape[0] _a : Union[str, Any] = model_inputs.pop('''prompt_text''' ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. _a : int = generate_kwargs.pop('''prefix_length''' , 0 ) if prefix_length > 0: _a : Tuple = '''max_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].max_new_tokens is not None ) if not has_max_new_tokens: _a : int = generate_kwargs.get('''max_length''' ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length _a : Dict = '''min_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL _a : Optional[Any] = self.model.generate(input_ids=_a , attention_mask=_a , **_a ) _a : int = generated_sequence.shape[0] if self.framework == "pt": _a : Tuple = generated_sequence.reshape(_a , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": _a : List[Any] = tf.reshape(_a , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def __lowercase ( self , _a , _a=ReturnType.FULL_TEXT , _a=True ) -> int: _a : Tuple = model_outputs['''generated_sequence'''][0] _a : int = model_outputs['''input_ids'''] _a : Any = model_outputs['''prompt_text'''] _a : Any = generated_sequence.numpy().tolist() _a : Any = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: _a : Optional[int] = {'''generated_token_ids''': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text _a : str = self.tokenizer.decode( _a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: _a : Union[str, Any] = 0 else: _a : str = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , ) ) if return_type == ReturnType.FULL_TEXT: _a : str = prompt_text + text[prompt_length:] else: _a : List[str] = text[prompt_length:] _a : Union[str, Any] = {'''generated_text''': all_text} records.append(_a ) return records
15
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging a__ = logging.get_logger(__name__) a__ = '''▁''' a__ = {'''vocab_file''': '''sentencepiece.bpe.model'''} a__ = { '''vocab_file''': { '''facebook/mbart-large-50-one-to-many-mmt''': ( '''https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model''' ), } } a__ = { '''facebook/mbart-large-50-one-to-many-mmt''': 1024, } # fmt: off a__ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''', '''af_ZA''', '''az_AZ''', '''bn_IN''', '''fa_IR''', '''he_IL''', '''hr_HR''', '''id_ID''', '''ka_GE''', '''km_KH''', '''mk_MK''', '''ml_IN''', '''mn_MN''', '''mr_IN''', '''pl_PL''', '''ps_AF''', '''pt_XX''', '''sv_SE''', '''sw_KE''', '''ta_IN''', '''te_IN''', '''th_TH''', '''tl_XX''', '''uk_UA''', '''ur_PK''', '''xh_ZA''', '''gl_ES''', '''sl_SI'''] class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : Union[str, Any] = VOCAB_FILES_NAMES UpperCAmelCase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ : Tuple = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ : Any = ["input_ids", "attention_mask"] UpperCAmelCase__ : List[int] = [] UpperCAmelCase__ : List[int] = [] def __init__( self , _a , _a=None , _a=None , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a = None , **_a , ) -> None: # Mask token behave like a normal word, i.e. include the space before it _a : int = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token _a : int = {} if sp_model_kwargs is None else sp_model_kwargs _a : List[Any] = kwargs.get('''additional_special_tokens''' , [] ) kwargs["additional_special_tokens"] += [ code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=_a , tgt_lang=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , ) _a : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_a ) ) _a : int = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token _a : Optional[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab _a : Any = 1 _a : int = len(self.sp_model ) _a : Optional[Any] = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_a ) } _a : str = {v: k for k, v in self.lang_code_to_id.items()} _a : str = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) _a : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} _a : Dict = src_lang if src_lang is not None else '''en_XX''' _a : Optional[Any] = self.lang_code_to_id[self._src_lang] _a : List[Any] = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def __lowercase ( self ) -> int: return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def __lowercase ( self ) -> str: return self._src_lang @src_lang.setter def __lowercase ( self , _a ) -> None: _a : int = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self ) -> Dict: _a : Tuple = self.__dict__.copy() _a : Optional[Any] = None return state def __setstate__( self , _a ) -> None: _a : List[Any] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): _a : Dict = {} _a : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __lowercase ( self ) -> Dict: _a : Dict = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __lowercase ( self , _a ) -> List[str]: return self.sp_model.encode(_a , out_type=_a ) def __lowercase ( self , _a ) -> int: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] _a : List[Any] = self.sp_model.PieceToId(_a ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def __lowercase ( self , _a ) -> str: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def __lowercase ( self , _a ) -> Dict: _a : Any = [] _a : Optional[int] = '''''' _a : Dict = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_a ) + token _a : int = True _a : List[Any] = [] else: current_sub_tokens.append(_a ) _a : Tuple = False out_string += self.sp_model.decode(_a ) return out_string.strip() def __lowercase ( self , _a , _a = None ) -> Tuple[str]: if not os.path.isdir(_a ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return _a : Dict = os.path.join( _a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _a ) elif not os.path.isfile(self.vocab_file ): with open(_a , '''wb''' ) as fi: _a : Optional[Any] = self.sp_model.serialized_model_proto() fi.write(_a ) return (out_vocab_file,) def __lowercase ( self , _a , _a = None , _a = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a ) _a : List[Any] = [1] * len(self.prefix_tokens ) _a : Dict = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(_a )) + suffix_ones return prefix_ones + ([0] * len(_a )) + ([0] * len(_a )) + suffix_ones def __lowercase ( self , _a , _a = None ) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def __lowercase ( self , _a , _a , _a , _a , **_a ) -> Optional[Any]: if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) _a : Optional[Any] = src_lang _a : Dict = self(_a , add_special_tokens=_a , return_tensors=_a , **_a ) _a : Optional[Any] = self.convert_tokens_to_ids(_a ) _a : Union[str, Any] = tgt_lang_id return inputs def __lowercase ( self , _a , _a = "en_XX" , _a = None , _a = "ro_RO" , **_a , ) -> BatchEncoding: _a : Tuple = src_lang _a : Dict = tgt_lang return super().prepare_seqaseq_batch(_a , _a , **_a ) def __lowercase ( self ) -> int: return self.set_src_lang_special_tokens(self.src_lang ) def __lowercase ( self ) -> Optional[int]: return self.set_tgt_lang_special_tokens(self.tgt_lang ) def __lowercase ( self , _a ) -> None: _a : int = self.lang_code_to_id[src_lang] _a : List[str] = [self.cur_lang_code_id] _a : List[str] = [self.eos_token_id] def __lowercase ( self , _a ) -> None: _a : List[str] = self.lang_code_to_id[tgt_lang] _a : str = [self.cur_lang_code_id] _a : Any = [self.eos_token_id]
369
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.test_utils import execute_subprocess_async def __UpperCAmelCase ( __a : Dict=None ) -> str: """simple docstring""" if subparsers is not None: _a : Union[str, Any] = subparsers.add_parser('''test''' ) else: _a : List[str] = argparse.ArgumentParser('''Accelerate test command''' ) parser.add_argument( '''--config_file''' ,default=__a ,help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ) ,) if subparsers is not None: parser.set_defaults(func=__a ) return parser def __UpperCAmelCase ( __a : List[Any] ) -> Union[str, Any]: """simple docstring""" _a : Dict = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] ) if args.config_file is None: _a : List[Any] = script_name else: _a : Union[str, Any] = F"""--config_file={args.config_file} {script_name}""" _a : str = ['''accelerate-launch'''] + test_args.split() _a : str = execute_subprocess_async(__a ,env=os.environ.copy() ) if result.returncode == 0: print('''Test is a success! You are ready for your distributed training!''' ) def __UpperCAmelCase ( ) -> List[Any]: """simple docstring""" _a : Optional[int] = test_command_parser() _a : List[Any] = parser.parse_args() test_command(__a ) if __name__ == "__main__": main()
15
0
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=__lowercase ) class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : str = field(default="question-answering-extractive" , metadata={"include_in_asdict_even_if_is_default": True} ) UpperCAmelCase__ : ClassVar[Features] = Features({"question": Value("string" ), "context": Value("string" )} ) UpperCAmelCase__ : ClassVar[Features] = Features( { "answers": Sequence( { "text": Value("string" ), "answer_start": Value("int32" ), } ) } ) UpperCAmelCase__ : str = "question" UpperCAmelCase__ : str = "context" UpperCAmelCase__ : str = "answers" @property def __lowercase ( self ) -> Dict[str, str]: return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
370
import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __lowercase ( self ) -> Union[str, Any]: _a : Optional[Any] = tempfile.mkdtemp() # fmt: off _a : Optional[int] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest'''] # fmt: on _a : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) _a : Any = { '''do_resize''': True, '''size''': {'''height''': 1_8, '''width''': 1_8}, '''do_normalize''': True, '''image_mean''': [0.5, 0.5, 0.5], '''image_std''': [0.5, 0.5, 0.5], } _a : str = os.path.join(self.tmpdirname , _a ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(_a , _a ) def __lowercase ( self , **_a ) -> Any: return BertTokenizer.from_pretrained(self.tmpdirname , **_a ) def __lowercase ( self , **_a ) -> str: return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a ) def __lowercase ( self ) -> List[Any]: shutil.rmtree(self.tmpdirname ) def __lowercase ( self ) -> Any: _a : Union[str, Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] _a : Tuple = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs] return image_inputs def __lowercase ( self ) -> str: _a : List[str] = self.get_tokenizer() _a : Tuple = self.get_image_processor() _a : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) processor.save_pretrained(self.tmpdirname ) _a : Dict = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , _a ) def __lowercase ( self ) -> Dict: _a : List[str] = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _a : Any = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) _a : List[Any] = self.get_image_processor(do_normalize=_a , padding_value=1.0 ) _a : Dict = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_a , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _a ) def __lowercase ( self ) -> Any: _a : Dict = self.get_image_processor() _a : str = self.get_tokenizer() _a : int = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) _a : List[str] = self.prepare_image_inputs() _a : List[Any] = image_processor(_a , return_tensors='''np''' ) _a : Dict = processor(images=_a , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __lowercase ( self ) -> List[str]: _a : Union[str, Any] = self.get_image_processor() _a : Dict = self.get_tokenizer() _a : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) _a : Tuple = '''lower newer''' _a : int = processor(text=_a ) _a : str = tokenizer(_a ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __lowercase ( self ) -> List[Any]: _a : Any = self.get_image_processor() _a : str = self.get_tokenizer() _a : Tuple = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) _a : List[Any] = '''lower newer''' _a : Union[str, Any] = self.prepare_image_inputs() _a : Any = processor(text=_a , images=_a ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with self.assertRaises(_a ): processor() def __lowercase ( self ) -> Optional[int]: _a : Union[str, Any] = self.get_image_processor() _a : List[str] = self.get_tokenizer() _a : Any = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) _a : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _a : int = processor.batch_decode(_a ) _a : int = tokenizer.batch_decode(_a ) self.assertListEqual(_a , _a ) def __lowercase ( self ) -> List[Any]: _a : Tuple = self.get_image_processor() _a : List[str] = self.get_tokenizer() _a : str = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) _a : Optional[int] = '''lower newer''' _a : Dict = self.prepare_image_inputs() _a : Any = processor(text=_a , images=_a ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
15
0
from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean a__ = 0 a__ = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] a__ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right a__ = tuple[int, int] class UpperCAmelCase_ : """simple docstring""" def __init__( self , _a , _a , _a , _a , _a , _a , ) -> None: _a : Any = pos_x _a : List[str] = pos_y _a : Tuple = (pos_y, pos_x) _a : List[str] = goal_x _a : Union[str, Any] = goal_y _a : Optional[Any] = g_cost _a : Union[str, Any] = parent _a : List[Any] = self.calculate_heuristic() _a : int = self.g_cost + self.h_cost def __lowercase ( self ) -> float: _a : Optional[int] = self.pos_x - self.goal_x _a : str = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(_a ) + abs(_a ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self , _a ) -> bool: return self.f_cost < other.f_cost class UpperCAmelCase_ : """simple docstring""" def __init__( self , _a , _a ) -> Dict: _a : Any = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _a ) _a : Optional[int] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , _a ) _a : str = [self.start] _a : list[Node] = [] _a : int = False def __lowercase ( self ) -> list[TPosition]: while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() _a : Any = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(_a ) self.closed_nodes.append(_a ) _a : Dict = self.get_successors(_a ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(_a ) else: # retrieve the best current path _a : List[Any] = self.open_nodes.pop(self.open_nodes.index(_a ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(_a ) else: self.open_nodes.append(_a ) return [self.start.pos] def __lowercase ( self , _a ) -> list[Node]: _a : Tuple = [] for action in delta: _a : Optional[int] = parent.pos_x + action[1] _a : List[str] = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_a ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( _a , _a , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _a , ) ) return successors def __lowercase ( self , _a ) -> list[TPosition]: _a : str = node _a : int = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) _a : Any = current_node.parent path.reverse() return path class UpperCAmelCase_ : """simple docstring""" def __init__( self , _a , _a ) -> None: _a : Union[str, Any] = AStar(_a , _a ) _a : List[Any] = AStar(_a , _a ) _a : Any = False def __lowercase ( self ) -> list[TPosition]: while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() _a : Dict = self.fwd_astar.open_nodes.pop(0 ) _a : Any = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( _a , _a ) self.fwd_astar.closed_nodes.append(_a ) self.bwd_astar.closed_nodes.append(_a ) _a : Dict = current_bwd_node _a : Any = current_fwd_node _a : Tuple = { self.fwd_astar: self.fwd_astar.get_successors(_a ), self.bwd_astar: self.bwd_astar.get_successors(_a ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(_a ) else: # retrieve the best current path _a : Tuple = astar.open_nodes.pop( astar.open_nodes.index(_a ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(_a ) else: astar.open_nodes.append(_a ) return [self.fwd_astar.start.pos] def __lowercase ( self , _a , _a ) -> list[TPosition]: _a : List[str] = self.fwd_astar.retrace_path(_a ) _a : int = self.bwd_astar.retrace_path(_a ) bwd_path.pop() bwd_path.reverse() _a : Dict = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] a__ = (0, 0) a__ = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) a__ = time.time() a__ = AStar(init, goal) a__ = a_star.search() a__ = time.time() - start_time print(f'''AStar execution time = {end_time:f} seconds''') a__ = time.time() a__ = BidirectionalAStar(init, goal) a__ = time.time() - bd_start_time print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
371
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( HubertConfig, HubertForCTC, HubertModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() a__ = logging.get_logger(__name__) a__ = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[int] ,__a : Optional[int] ,__a : List[str] ,__a : List[Any] ) -> Union[str, Any]: """simple docstring""" for attribute in key.split('''.''' ): _a : Optional[Any] = getattr(__a ,__a ) if weight_type is not None: _a : Dict = getattr(__a ,__a ).shape else: _a : Optional[int] = hf_pointer.shape assert hf_shape == value.shape, ( F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": _a : List[Any] = value elif weight_type == "weight_g": _a : Any = value elif weight_type == "weight_v": _a : Union[str, Any] = value elif weight_type == "bias": _a : Optional[int] = value else: _a : List[Any] = value logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def __UpperCAmelCase ( __a : Any ,__a : Union[str, Any] ,__a : Union[str, Any] ) -> int: """simple docstring""" _a : Union[str, Any] = [] _a : Union[str, Any] = fairseq_model.state_dict() _a : Union[str, Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): _a : int = False if "conv_layers" in name: load_conv_layer( __a ,__a ,__a ,__a ,hf_model.config.feat_extract_norm == '''group''' ,) _a : Optional[Any] = True else: for key, mapped_key in MAPPING.items(): _a : Union[str, Any] = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned): _a : Any = True if "*" in mapped_key: _a : Optional[int] = name.split(__a )[0].split('''.''' )[-2] _a : Any = mapped_key.replace('''*''' ,__a ) if "weight_g" in name: _a : List[Any] = '''weight_g''' elif "weight_v" in name: _a : List[str] = '''weight_v''' elif "weight" in name: _a : Any = '''weight''' elif "bias" in name: _a : str = '''bias''' else: _a : Any = None set_recursively(__a ,__a ,__a ,__a ,__a ) continue if not is_used: unused_weights.append(__a ) logger.warning(F"""Unused weights: {unused_weights}""" ) def __UpperCAmelCase ( __a : int ,__a : Optional[Any] ,__a : Dict ,__a : List[str] ,__a : Any ) -> Tuple: """simple docstring""" _a : int = full_name.split('''conv_layers.''' )[-1] _a : Any = name.split('''.''' ) _a : List[Any] = int(items[0] ) _a : Optional[int] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) _a : Optional[int] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) _a : Optional[Any] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) _a : int = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) _a : Any = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(__a ) @torch.no_grad() def __UpperCAmelCase ( __a : Dict ,__a : List[Any] ,__a : List[str]=None ,__a : Optional[int]=None ,__a : int=True ) -> List[Any]: """simple docstring""" if config_path is not None: _a : Tuple = HubertConfig.from_pretrained(__a ) else: _a : Any = HubertConfig() if is_finetuned: if dict_path: _a : Tuple = Dictionary.load(__a ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _a : Any = target_dict.pad_index _a : Tuple = target_dict.bos_index _a : Optional[int] = target_dict.eos_index _a : Optional[Any] = len(target_dict.symbols ) _a : Tuple = os.path.join(__a ,'''vocab.json''' ) if not os.path.isdir(__a ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__a ) ) return os.makedirs(__a ,exist_ok=__a ) with open(__a ,'''w''' ,encoding='''utf-8''' ) as vocab_handle: json.dump(target_dict.indices ,__a ) _a : Tuple = WavaVecaCTCTokenizer( __a ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token='''|''' ,do_lower_case=__a ,) _a : Tuple = True if config.feat_extract_norm == '''layer''' else False _a : List[Any] = WavaVecaFeatureExtractor( feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=__a ,return_attention_mask=__a ,) _a : List[Any] = WavaVecaProcessor(feature_extractor=__a ,tokenizer=__a ) processor.save_pretrained(__a ) _a : Tuple = HubertForCTC(__a ) else: _a : Tuple = HubertModel(__a ) if is_finetuned: _a , _a , _a : int = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] ,arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: _a , _a , _a : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) _a : Any = model[0].eval() recursively_load_weights(__a ,__a ,__a ) hf_wavavec.save_pretrained(__a ) if __name__ == "__main__": a__ = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) a__ = parser.parse_args() convert_hubert_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
15
0
import math class UpperCAmelCase_ : """simple docstring""" def __init__( self , _a=0 ) -> int: # a graph with Node 0,1,...,N-1 _a : List[str] = n _a : Any = [ [math.inf for j in range(0 , _a )] for i in range(0 , _a ) ] # adjacency matrix for weight _a : Tuple = [ [math.inf for j in range(0 , _a )] for i in range(0 , _a ) ] # dp[i][j] stores minimum distance from i to j def __lowercase ( self , _a , _a , _a ) -> Optional[Any]: _a : int = w def __lowercase ( self ) -> str: for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): _a : Optional[int] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def __lowercase ( self , _a , _a ) -> Union[str, Any]: return self.dp[u][v] if __name__ == "__main__": a__ = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
350
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : List[str] = ["image_processor", "tokenizer"] UpperCAmelCase__ : str = "ViltImageProcessor" UpperCAmelCase__ : Union[str, Any] = ("BertTokenizer", "BertTokenizerFast") def __init__( self , _a=None , _a=None , **_a ) -> Any: _a : Union[str, Any] = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , _a , ) _a : Dict = kwargs.pop('''feature_extractor''' ) _a : Optional[int] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(_a , _a ) _a : int = self.image_processor def __call__( self , _a , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ) -> BatchEncoding: _a : Tuple = self.tokenizer( text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , ) # add pixel_values + pixel_mask _a : str = self.image_processor(_a , return_tensors=_a ) encoding.update(_a ) return encoding def __lowercase ( self , *_a , **_a ) -> Optional[Any]: return self.tokenizer.batch_decode(*_a , **_a ) def __lowercase ( self , *_a , **_a ) -> str: return self.tokenizer.decode(*_a , **_a ) @property def __lowercase ( self ) -> Optional[int]: _a : str = self.tokenizer.model_input_names _a : Optional[Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def __lowercase ( self ) -> Optional[Any]: warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , ) return self.image_processor_class @property def __lowercase ( self ) -> Any: warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , ) return self.image_processor
15
0
import darl # noqa import gym import tqdm from diffusers.experimental import ValueGuidedRLPipeline a__ = { '''n_samples''': 64, '''horizon''': 32, '''num_inference_steps''': 20, '''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network '''scale_grad_by_std''': True, '''scale''': 0.1, '''eta''': 0.0, '''t_grad_cutoff''': 2, '''device''': '''cpu''', } if __name__ == "__main__": a__ = '''hopper-medium-v2''' a__ = gym.make(env_name) a__ = ValueGuidedRLPipeline.from_pretrained( '''bglick13/hopper-medium-v2-value-function-hor32''', env=env, ) env.seed(0) a__ = env.reset() a__ = 0 a__ = 0 a__ = 1000 a__ = [obs.copy()] try: for t in tqdm.tqdm(range(T)): # call the policy a__ = pipeline(obs, planning_horizon=32) # execute action in environment a__ , a__ , a__ , a__ = env.step(denorm_actions) a__ = env.get_normalized_score(total_reward) # update return total_reward += reward total_score += score print( f'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:''' f''' {total_score}''' ) # save observations for rendering rollout.append(next_observation.copy()) a__ = next_observation except KeyboardInterrupt: pass print(f'''Total reward: {total_reward}''')
351
from math import ceil def __UpperCAmelCase ( __a : int = 1_001 ) -> int: """simple docstring""" _a : Dict = 1 for i in range(1 ,int(ceil(n / 2.0 ) ) ): _a : int = 2 * i + 1 _a : str = 2 * i _a : Any = total + 4 * odd**2 - 6 * even return total if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution()) else: try: a__ = int(sys.argv[1]) print(solution(n)) except ValueError: print('''Invalid entry - please enter a number''')
15
0
"""simple docstring""" import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def __UpperCAmelCase ( __a : Tuple ,__a : Union[str, Any] ,__a : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" _a : Dict = AutoConfig.from_pretrained(__a ) _a : Union[str, Any] = FlaxAutoModelForSeqaSeqLM.from_config(config=__a ) _a : Optional[Any] = checkpoints.load_tax_checkpoint(__a ) _a : Tuple = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp'''] if config.model_type == "t5": _a : Optional[int] = '''SelfAttention''' if config.model_type == "longt5" and config.encoder_attention_type == "local": _a : int = '''LocalSelfAttention''' elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _a : Optional[Any] = '''TransientGlobalSelfAttention''' else: raise ValueError( '''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`''' ''' attribute with a value from [\'local\', \'transient-global].''' ) # Encoder for layer_index in range(config.num_layers ): _a : Optional[int] = F"""layers_{str(__a )}""" # Self-Attention _a : Optional[Any] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel'''] _a : Dict = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel'''] _a : Optional[Any] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel'''] _a : Dict = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel'''] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _a : Tuple = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale'''] # Layer Normalization _a : Optional[int] = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale'''] if split_mlp_wi: _a : Optional[Any] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel'''] _a : List[str] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel'''] else: _a : Optional[int] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel'''] _a : List[str] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel'''] # Layer Normalization _a : str = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale'''] # Assigning _a : List[Any] = flax_model.params['''encoder''']['''block'''][str(__a )]['''layer'''] _a : Union[str, Any] = tax_attention_key _a : int = tax_attention_out _a : str = tax_attention_query _a : Optional[Any] = tax_attention_value _a : Union[str, Any] = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _a : Dict = tax_global_layer_norm if split_mlp_wi: _a : Any = tax_mlp_wi_a _a : int = tax_mlp_wi_a else: _a : Dict = tax_mlp_wi _a : int = tax_mlp_wo _a : Tuple = tax_mlp_layer_norm _a : List[Any] = flax_model_encoder_layer_block # Only for layer 0: _a : Tuple = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T _a : Tuple = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _a : int = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T _a : Optional[int] = tax_encoder_global_rel_embedding # Assigning _a : List[Any] = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale'''] _a : Optional[Any] = tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): _a : str = F"""layers_{str(__a )}""" # Self-Attention _a : Optional[Any] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel'''] _a : List[str] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel'''] _a : List[Any] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel'''] _a : Optional[Any] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel'''] # Layer Normalization _a : str = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][ '''scale''' ] # Encoder-Decoder-Attention _a : Tuple = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention'''] _a : Optional[int] = tax_enc_dec_attention_module['''key''']['''kernel'''] _a : str = tax_enc_dec_attention_module['''out''']['''kernel'''] _a : List[Any] = tax_enc_dec_attention_module['''query''']['''kernel'''] _a : str = tax_enc_dec_attention_module['''value''']['''kernel'''] # Layer Normalization _a : Any = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale'''] # MLP if split_mlp_wi: _a : str = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel'''] _a : Tuple = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel'''] else: _a : Optional[int] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel'''] _a : Optional[int] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel'''] # Layer Normalization _a : Optional[Any] = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale'''] # Assigning _a : Optional[Any] = flax_model.params['''decoder''']['''block'''][str(__a )]['''layer'''] _a : List[Any] = tax_attention_key _a : Optional[int] = tax_attention_out _a : str = tax_attention_query _a : Tuple = tax_attention_value _a : List[str] = tax_pre_attention_layer_norm _a : Any = tax_enc_dec_attention_key _a : Dict = tax_enc_dec_attention_out _a : Optional[Any] = tax_enc_dec_attention_query _a : List[str] = tax_enc_dec_attention_value _a : Dict = tax_cross_layer_norm if split_mlp_wi: _a : Optional[int] = tax_mlp_wi_a _a : Dict = tax_mlp_wi_a else: _a : Optional[Any] = tax_mlp_wi _a : Any = tax_mlp_wo _a : Optional[Any] = txa_mlp_layer_norm _a : Union[str, Any] = flax_model_decoder_layer_block # Decoder Normalization _a : Tuple = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale'''] _a : str = txa_decoder_norm # Only for layer 0: _a : Dict = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T _a : str = tax_decoder_rel_embedding # Token Embeddings _a : Optional[Any] = tax_model['''target''']['''token_embedder''']['''embedding'''] _a : Any = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: _a : Any = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel'''] flax_model.save_pretrained(__a ) print('''T5X Model was sucessfully converted!''' ) if __name__ == "__main__": a__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.''' ) parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''') parser.add_argument( '''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.''' ) a__ = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
352
from typing import Dict, Iterable, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract a__ = logging.get_logger(__name__) def __UpperCAmelCase ( __a : Union[str, Any] ,__a : str ,__a : Union[str, Any] ) -> List[str]: """simple docstring""" return [ int(1_000 * (box[0] / width) ), int(1_000 * (box[1] / height) ), int(1_000 * (box[2] / width) ), int(1_000 * (box[3] / height) ), ] def __UpperCAmelCase ( __a : np.ndarray ,__a : Optional[str] ,__a : Optional[str] ) -> List[Any]: """simple docstring""" _a : str = to_pil_image(__a ) _a , _a : Optional[Any] = pil_image.size _a : Tuple = pytesseract.image_to_data(__a ,lang=__a ,output_type='''dict''' ,config=__a ) _a , _a , _a , _a , _a : List[str] = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height'''] # filter empty words and corresponding coordinates _a : Dict = [idx for idx, word in enumerate(__a ) if not word.strip()] _a : str = [word for idx, word in enumerate(__a ) if idx not in irrelevant_indices] _a : List[str] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices] _a : Union[str, Any] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices] _a : str = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices] _a : Union[str, Any] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format _a : int = [] for x, y, w, h in zip(__a ,__a ,__a ,__a ): _a : List[str] = [x, y, x + w, y + h] actual_boxes.append(__a ) # finally, normalize the bounding boxes _a : Dict = [] for box in actual_boxes: normalized_boxes.append(normalize_box(__a ,__a ,__a ) ) assert len(__a ) == len(__a ), "Not as many words as there are bounding boxes" return words, normalized_boxes class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : Optional[int] = ["pixel_values"] def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = True , _a = 1 / 2_5_5 , _a = True , _a = None , _a = None , _a = True , _a = None , _a = "" , **_a , ) -> None: super().__init__(**_a ) _a : List[str] = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4} _a : Union[str, Any] = get_size_dict(_a ) _a : int = do_resize _a : Optional[int] = size _a : str = resample _a : str = do_rescale _a : Any = rescale_value _a : Optional[Any] = do_normalize _a : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _a : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD _a : List[Any] = apply_ocr _a : Optional[int] = ocr_lang _a : Tuple = tesseract_config def __lowercase ( self , _a , _a , _a = PILImageResampling.BILINEAR , _a = None , **_a , ) -> np.ndarray: _a : Any = get_size_dict(_a ) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" ) _a : Optional[int] = (size['''height'''], size['''width''']) return resize(_a , size=_a , resample=_a , data_format=_a , **_a ) def __lowercase ( self , _a , _a , _a = None , **_a , ) -> np.ndarray: return rescale(_a , scale=_a , data_format=_a , **_a ) def __lowercase ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray: return normalize(_a , mean=_a , std=_a , data_format=_a , **_a ) def __lowercase ( self , _a , _a = None , _a = None , _a=None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> PIL.Image.Image: _a : Optional[int] = do_resize if do_resize is not None else self.do_resize _a : Union[str, Any] = size if size is not None else self.size _a : Any = get_size_dict(_a ) _a : List[str] = resample if resample is not None else self.resample _a : int = do_rescale if do_rescale is not None else self.do_rescale _a : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor _a : int = do_normalize if do_normalize is not None else self.do_normalize _a : str = image_mean if image_mean is not None else self.image_mean _a : Tuple = image_std if image_std is not None else self.image_std _a : Any = apply_ocr if apply_ocr is not None else self.apply_ocr _a : int = ocr_lang if ocr_lang is not None else self.ocr_lang _a : Optional[int] = tesseract_config if tesseract_config is not None else self.tesseract_config _a : List[Any] = make_list_of_images(_a ) if not valid_images(_a ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' ) # All transformations expect numpy arrays. _a : Any = [to_numpy_array(_a ) for image in images] # Tesseract OCR to get words + normalized bounding boxes if apply_ocr: requires_backends(self , '''pytesseract''' ) _a : str = [] _a : str = [] for image in images: _a , _a : Union[str, Any] = apply_tesseract(_a , _a , _a ) words_batch.append(_a ) boxes_batch.append(_a ) if do_resize: _a : List[str] = [self.resize(image=_a , size=_a , resample=_a ) for image in images] if do_rescale: _a : Optional[Any] = [self.rescale(image=_a , scale=_a ) for image in images] if do_normalize: _a : List[Any] = [self.normalize(image=_a , mean=_a , std=_a ) for image in images] _a : List[str] = [to_channel_dimension_format(_a , _a ) for image in images] _a : List[str] = BatchFeature(data={'''pixel_values''': images} , tensor_type=_a ) if apply_ocr: _a : Optional[int] = words_batch _a : List[Any] = boxes_batch return data
15
0
"""simple docstring""" import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.dataset_dict import IterableDatasetDict from datasets.iterable_dataset import IterableDataset from datasets.load import dataset_module_factory, import_main_class from datasets.utils.file_utils import cached_path a__ = [ {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.de'''}, {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.en'''}, {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.fr'''}, {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.frr'''}, {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.it'''}, {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.simple'''}, {'''dataset''': '''snli''', '''config_name''': '''plain_text'''}, {'''dataset''': '''eli5''', '''config_name''': '''LFQA_reddit'''}, {'''dataset''': '''wiki40b''', '''config_name''': '''en'''}, {'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.compressed'''}, {'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.no_index'''}, {'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.multiset.no_index'''}, {'''dataset''': '''natural_questions''', '''config_name''': '''default'''}, ] def __UpperCAmelCase ( __a : Optional[int]=True ) -> Optional[int]: """simple docstring""" if with_config: return [ { "testcase_name": d["dataset"] + "/" + d["config_name"], "dataset": d["dataset"], "config_name": d["config_name"], } for d in DATASETS_ON_HF_GCP ] else: return [ {"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP} ] @parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__lowercase ) ) class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : int = None UpperCAmelCase__ : int = None def __lowercase ( self , _a , _a ) -> Optional[Any]: with TemporaryDirectory() as tmp_dir: _a : Union[str, Any] = dataset_module_factory(_a , cache_dir=_a ) _a : Tuple = import_main_class(dataset_module.module_path , dataset=_a ) _a : DatasetBuilder = builder_cls( cache_dir=_a , config_name=_a , hash=dataset_module.hash , ) _a : Union[str, Any] = '''/'''.join( [ HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=_a ).replace(os.sep , '''/''' ), config.DATASET_INFO_FILENAME, ] ) _a : Dict = cached_path(_a , cache_dir=_a ) self.assertTrue(os.path.exists(_a ) ) @pytest.mark.integration def __UpperCAmelCase ( __a : List[str] ) -> int: """simple docstring""" _a : Union[str, Any] = tmp_path_factory.mktemp('''test_hf_gcp''' ) / '''test_wikipedia_simple''' _a : str = dataset_module_factory('''wikipedia''' ,cache_dir=__a ) _a : Dict = import_main_class(dataset_module.module_path ) _a : DatasetBuilder = builder_cls( cache_dir=__a ,config_name='''20220301.frr''' ,hash=dataset_module.hash ,) # use the HF cloud storage, not the original download_and_prepare that uses apache-beam _a : Union[str, Any] = None builder_instance.download_and_prepare() _a : Optional[int] = builder_instance.as_dataset() assert ds @pytest.mark.integration def __UpperCAmelCase ( __a : Optional[Any] ) -> Dict: """simple docstring""" _a : Optional[Any] = dataset_module_factory('''wikipedia''' ,cache_dir=__a ) _a : Any = import_main_class(dataset_module.module_path ,dataset=__a ) _a : DatasetBuilder = builder_cls( cache_dir=__a ,config_name='''20220301.frr''' ,hash=dataset_module.hash ,) _a : Dict = builder_instance.as_streaming_dataset() assert ds assert isinstance(__a ,__a ) assert "train" in ds assert isinstance(ds['''train'''] ,__a ) assert next(iter(ds['''train'''] ) )
353
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def __UpperCAmelCase ( ) -> Optional[Any]: """simple docstring""" _a : int = ArgumentParser('''Accelerate CLI tool''' ,usage='''accelerate <command> [<args>]''' ,allow_abbrev=__a ) _a : Optional[int] = parser.add_subparsers(help='''accelerate command helpers''' ) # Register commands get_config_parser(subparsers=__a ) env_command_parser(subparsers=__a ) launch_command_parser(subparsers=__a ) tpu_command_parser(subparsers=__a ) test_command_parser(subparsers=__a ) # Let's go _a : Dict = parser.parse_args() if not hasattr(__a ,'''func''' ): parser.print_help() exit(1 ) # Run args.func(__a ) if __name__ == "__main__": main()
15
0
"""simple docstring""" import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all image processors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...image_processing_utils import ImageProcessingMixin from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) a__ = logging.get_logger(__name__) a__ = OrderedDict( [ ('''align''', '''EfficientNetImageProcessor'''), ('''beit''', '''BeitImageProcessor'''), ('''bit''', '''BitImageProcessor'''), ('''blip''', '''BlipImageProcessor'''), ('''blip-2''', '''BlipImageProcessor'''), ('''bridgetower''', '''BridgeTowerImageProcessor'''), ('''chinese_clip''', '''ChineseCLIPImageProcessor'''), ('''clip''', '''CLIPImageProcessor'''), ('''clipseg''', '''ViTImageProcessor'''), ('''conditional_detr''', '''ConditionalDetrImageProcessor'''), ('''convnext''', '''ConvNextImageProcessor'''), ('''convnextv2''', '''ConvNextImageProcessor'''), ('''cvt''', '''ConvNextImageProcessor'''), ('''data2vec-vision''', '''BeitImageProcessor'''), ('''deformable_detr''', '''DeformableDetrImageProcessor'''), ('''deit''', '''DeiTImageProcessor'''), ('''deta''', '''DetaImageProcessor'''), ('''detr''', '''DetrImageProcessor'''), ('''dinat''', '''ViTImageProcessor'''), ('''donut-swin''', '''DonutImageProcessor'''), ('''dpt''', '''DPTImageProcessor'''), ('''efficientformer''', '''EfficientFormerImageProcessor'''), ('''efficientnet''', '''EfficientNetImageProcessor'''), ('''flava''', '''FlavaImageProcessor'''), ('''focalnet''', '''BitImageProcessor'''), ('''git''', '''CLIPImageProcessor'''), ('''glpn''', '''GLPNImageProcessor'''), ('''groupvit''', '''CLIPImageProcessor'''), ('''imagegpt''', '''ImageGPTImageProcessor'''), ('''instructblip''', '''BlipImageProcessor'''), ('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''), ('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''), ('''levit''', '''LevitImageProcessor'''), ('''mask2former''', '''Mask2FormerImageProcessor'''), ('''maskformer''', '''MaskFormerImageProcessor'''), ('''mgp-str''', '''ViTImageProcessor'''), ('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''), ('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''), ('''mobilevit''', '''MobileViTImageProcessor'''), ('''mobilevit''', '''MobileViTImageProcessor'''), ('''mobilevitv2''', '''MobileViTImageProcessor'''), ('''nat''', '''ViTImageProcessor'''), ('''oneformer''', '''OneFormerImageProcessor'''), ('''owlvit''', '''OwlViTImageProcessor'''), ('''perceiver''', '''PerceiverImageProcessor'''), ('''pix2struct''', '''Pix2StructImageProcessor'''), ('''poolformer''', '''PoolFormerImageProcessor'''), ('''regnet''', '''ConvNextImageProcessor'''), ('''resnet''', '''ConvNextImageProcessor'''), ('''sam''', '''SamImageProcessor'''), ('''segformer''', '''SegformerImageProcessor'''), ('''swiftformer''', '''ViTImageProcessor'''), ('''swin''', '''ViTImageProcessor'''), ('''swin2sr''', '''Swin2SRImageProcessor'''), ('''swinv2''', '''ViTImageProcessor'''), ('''table-transformer''', '''DetrImageProcessor'''), ('''timesformer''', '''VideoMAEImageProcessor'''), ('''tvlt''', '''TvltImageProcessor'''), ('''upernet''', '''SegformerImageProcessor'''), ('''van''', '''ConvNextImageProcessor'''), ('''videomae''', '''VideoMAEImageProcessor'''), ('''vilt''', '''ViltImageProcessor'''), ('''vit''', '''ViTImageProcessor'''), ('''vit_hybrid''', '''ViTHybridImageProcessor'''), ('''vit_mae''', '''ViTImageProcessor'''), ('''vit_msn''', '''ViTImageProcessor'''), ('''xclip''', '''CLIPImageProcessor'''), ('''yolos''', '''YolosImageProcessor'''), ] ) a__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES) def __UpperCAmelCase ( __a : str ) -> Union[str, Any]: """simple docstring""" for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items(): if class_name in extractors: _a : List[Any] = model_type_to_module_name(__a ) _a : Tuple = importlib.import_module(F""".{module_name}""" ,'''transformers.models''' ) try: return getattr(__a ,__a ) except AttributeError: continue for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items(): if getattr(__a ,'''__name__''' ,__a ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. _a : Optional[int] = importlib.import_module('''transformers''' ) if hasattr(__a ,__a ): return getattr(__a ,__a ) return None def __UpperCAmelCase ( __a : Union[str, os.PathLike] ,__a : Optional[Union[str, os.PathLike]] = None ,__a : bool = False ,__a : bool = False ,__a : Optional[Dict[str, str]] = None ,__a : Optional[Union[bool, str]] = None ,__a : Optional[str] = None ,__a : bool = False ,**__a : Dict ,) -> Tuple: """simple docstring""" _a : Optional[int] = get_file_from_repo( __a ,__a ,cache_dir=__a ,force_download=__a ,resume_download=__a ,proxies=__a ,use_auth_token=__a ,revision=__a ,local_files_only=__a ,) if resolved_config_file is None: logger.info( '''Could not locate the image processor configuration file, will try to use the model config instead.''' ) return {} with open(__a ,encoding='''utf-8''' ) as reader: return json.load(__a ) class UpperCAmelCase_ : """simple docstring""" def __init__( self ) -> List[Any]: raise EnvironmentError( '''AutoImageProcessor is designed to be instantiated ''' '''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' ) @classmethod @replace_list_option_in_docstrings(_a ) def __lowercase ( cls , _a , **_a ) -> Optional[Any]: _a : Optional[Any] = kwargs.pop('''config''' , _a ) _a : Optional[Any] = kwargs.pop('''trust_remote_code''' , _a ) _a : Any = True _a : Any = ImageProcessingMixin.get_image_processor_dict(_a , **_a ) _a : Optional[int] = config_dict.get('''image_processor_type''' , _a ) _a : Union[str, Any] = None if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ): _a : Optional[Any] = config_dict['''auto_map''']['''AutoImageProcessor'''] # If we still don't have the image processor class, check if we're loading from a previous feature extractor config # and if so, infer the image processor class from there. if image_processor_class is None and image_processor_auto_map is None: _a : int = config_dict.pop('''feature_extractor_type''' , _a ) if feature_extractor_class is not None: logger.warning( '''Could not find image processor class in the image processor config or the model config. Loading''' ''' based on pattern matching with the model\'s feature extractor configuration.''' ) _a : str = feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' ) if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ): _a : int = config_dict['''auto_map''']['''AutoFeatureExtractor'''] _a : Optional[int] = feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' ) logger.warning( '''Could not find image processor auto map in the image processor config or the model config.''' ''' Loading based on pattern matching with the model\'s feature extractor configuration.''' ) # If we don't find the image processor class in the image processor config, let's try the model config. if image_processor_class is None and image_processor_auto_map is None: if not isinstance(_a , _a ): _a : List[Any] = AutoConfig.from_pretrained(_a , **_a ) # It could be in `config.image_processor_type`` _a : Dict = getattr(_a , '''image_processor_type''' , _a ) if hasattr(_a , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map: _a : Optional[Any] = config.auto_map['''AutoImageProcessor'''] if image_processor_class is not None: _a : List[Any] = image_processor_class_from_name(_a ) _a : int = image_processor_auto_map is not None _a : Optional[int] = image_processor_class is not None or type(_a ) in IMAGE_PROCESSOR_MAPPING _a : Any = resolve_trust_remote_code( _a , _a , _a , _a ) if has_remote_code and trust_remote_code: _a : Any = get_class_from_dynamic_module( _a , _a , **_a ) _a : Dict = kwargs.pop('''code_revision''' , _a ) if os.path.isdir(_a ): image_processor_class.register_for_auto_class() return image_processor_class.from_dict(_a , **_a ) elif image_processor_class is not None: return image_processor_class.from_dict(_a , **_a ) # Last try: we use the IMAGE_PROCESSOR_MAPPING. elif type(_a ) in IMAGE_PROCESSOR_MAPPING: _a : str = IMAGE_PROCESSOR_MAPPING[type(_a )] return image_processor_class.from_dict(_a , **_a ) raise ValueError( F"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """ F"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """ F"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" ) @staticmethod def __lowercase ( _a , _a ) -> Dict: IMAGE_PROCESSOR_MAPPING.register(_a , _a )
354
import itertools import os import random import tempfile import unittest import numpy as np from transformers import TvltFeatureExtractor, is_datasets_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch if is_datasets_available(): from datasets import load_dataset a__ = random.Random() def __UpperCAmelCase ( __a : Tuple ,__a : str=1.0 ,__a : Optional[int]=None ,__a : List[Any]=None ) -> Any: """simple docstring""" if rng is None: _a : Dict = global_rng _a : Optional[Any] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __init__( self , _a , _a=7 , _a=4_0_0 , _a=2_0_0_0 , _a=2_0_4_8 , _a=1_2_8 , _a=1 , _a=5_1_2 , _a=3_0 , _a=4_4_1_0_0 , ) -> List[Any]: _a : Optional[Any] = parent _a : str = batch_size _a : List[str] = min_seq_length _a : str = max_seq_length _a : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) _a : List[Any] = spectrogram_length _a : List[str] = feature_size _a : List[Any] = num_audio_channels _a : Tuple = hop_length _a : Optional[int] = chunk_length _a : int = sampling_rate def __lowercase ( self ) -> Union[str, Any]: return { "spectrogram_length": self.spectrogram_length, "feature_size": self.feature_size, "num_audio_channels": self.num_audio_channels, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "sampling_rate": self.sampling_rate, } def __lowercase ( self , _a=False , _a=False ) -> List[Any]: def _flatten(_a ): return list(itertools.chain(*_a ) ) if equal_length: _a : List[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size _a : List[Any] = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: _a : str = [np.asarray(_a ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class UpperCAmelCase_ ( __lowercase , unittest.TestCase ): """simple docstring""" UpperCAmelCase__ : List[Any] = TvltFeatureExtractor def __lowercase ( self ) -> Dict: _a : List[str] = TvltFeatureExtractionTester(self ) def __lowercase ( self ) -> Any: _a : List[Any] = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(_a , '''spectrogram_length''' ) ) self.assertTrue(hasattr(_a , '''feature_size''' ) ) self.assertTrue(hasattr(_a , '''num_audio_channels''' ) ) self.assertTrue(hasattr(_a , '''hop_length''' ) ) self.assertTrue(hasattr(_a , '''chunk_length''' ) ) self.assertTrue(hasattr(_a , '''sampling_rate''' ) ) def __lowercase ( self ) -> Optional[int]: _a : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _a : int = feat_extract_first.save_pretrained(_a )[0] check_json_file_has_correct_format(_a ) _a : Dict = self.feature_extraction_class.from_pretrained(_a ) _a : List[Any] = feat_extract_first.to_dict() _a : Union[str, Any] = feat_extract_second.to_dict() _a : Any = dict_first.pop('''mel_filters''' ) _a : int = dict_second.pop('''mel_filters''' ) self.assertTrue(np.allclose(_a , _a ) ) self.assertEqual(_a , _a ) def __lowercase ( self ) -> Optional[int]: _a : Any = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _a : Optional[int] = os.path.join(_a , '''feat_extract.json''' ) feat_extract_first.to_json_file(_a ) _a : List[str] = self.feature_extraction_class.from_json_file(_a ) _a : List[Any] = feat_extract_first.to_dict() _a : Dict = feat_extract_second.to_dict() _a : str = dict_first.pop('''mel_filters''' ) _a : str = dict_second.pop('''mel_filters''' ) self.assertTrue(np.allclose(_a , _a ) ) self.assertEqual(_a , _a ) def __lowercase ( self ) -> Union[str, Any]: # Initialize feature_extractor _a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict ) # create three inputs of length 800, 1000, and 1200 _a : Any = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )] _a : List[str] = [np.asarray(_a ) for speech_input in speech_inputs] # Test not batched input _a : Tuple = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test batched _a : Dict = feature_extractor(_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test audio masking _a : Union[str, Any] = feature_extractor( _a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 , mask_audio=_a ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test 2-D numpy arrays are batched. _a : Optional[Any] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)] _a : int = np.asarray(_a ) _a : Tuple = feature_extractor(_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) def __lowercase ( self , _a ) -> Optional[Any]: _a : List[Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech _a : Optional[int] = ds.sort('''id''' ).select(range(_a ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def __lowercase ( self ) -> int: _a : Union[str, Any] = self._load_datasamples(1 ) _a : int = TvltFeatureExtractor() _a : Union[str, Any] = feature_extractor(_a , return_tensors='''pt''' ).audio_values self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) ) _a : Union[str, Any] = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] ) self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _a , atol=1e-4 ) )
15
0
import json import os import re import sys import urllib.request import requests from bsa import BeautifulSoup a__ = { '''User-Agent''': '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36''' ''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''' } def __UpperCAmelCase ( __a : str = "dhaka" ,__a : int = 5 ) -> int: """simple docstring""" _a : Optional[Any] = min(__a ,50 ) # Prevent abuse! _a : str = { '''q''': query, '''tbm''': '''isch''', '''hl''': '''en''', '''ijn''': '''0''', } _a : Dict = requests.get('''https://www.google.com/search''' ,params=__a ,headers=__a ) _a : List[str] = BeautifulSoup(html.text ,'''html.parser''' ) _a : Union[str, Any] = ''''''.join( re.findall(R'''AF_initDataCallback\(([^<]+)\);''' ,str(soup.select('''script''' ) ) ) ) _a : Optional[Any] = json.dumps(__a ) _a : Tuple = json.loads(__a ) _a : List[str] = re.findall( R'''\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",''' ,__a ,) if not matched_google_image_data: return 0 _a : Dict = re.sub( R'''\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]''' ,'''''' ,str(__a ) ,) _a : List[str] = re.findall( R'''(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]''' ,__a ,) for index, fixed_full_res_image in enumerate(__a ): if index >= max_images: return index _a : List[Any] = bytes(__a ,'''ascii''' ).decode( '''unicode-escape''' ) _a : Optional[Any] = bytes(__a ,'''ascii''' ).decode( '''unicode-escape''' ) _a : Any = urllib.request.build_opener() _a : List[str] = [ ( '''User-Agent''', '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36''' ''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''', ) ] urllib.request.install_opener(__a ) _a : Any = F"""query_{query.replace(' ' ,'_' )}""" if not os.path.exists(__a ): os.makedirs(__a ) urllib.request.urlretrieve( # noqa: S310 __a ,F"""{path_name}/original_size_img_{index}.jpg""" ) return index if __name__ == "__main__": try: a__ = download_images_from_google_query(sys.argv[1]) print(f'''{image_count} images were downloaded to disk.''') except IndexError: print('''Please provide a search term.''') raise
355
from typing import Dict import numpy as np from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException if is_tf_available(): import tensorflow as tf from ..tf_utils import stable_softmax if is_torch_available(): import torch a__ = logging.get_logger(__name__) @add_end_docstrings( __lowercase , r"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , ) class UpperCAmelCase_ ( __lowercase ): """simple docstring""" def __lowercase ( self , _a ) -> np.ndarray: if self.framework == "tf": _a : List[str] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy() elif self.framework == "pt": _a : Tuple = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_a ) else: raise ValueError('''Unsupported framework''' ) return masked_index def __lowercase ( self , _a ) -> np.ndarray: _a : int = self.get_masked_index(_a ) _a : Tuple = np.prod(masked_index.shape ) if numel < 1: raise PipelineException( '''fill-mask''' , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , ) def __lowercase ( self , _a ) -> Optional[int]: if isinstance(_a , _a ): for model_input in model_inputs: self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] ) else: for input_ids in model_inputs["input_ids"]: self._ensure_exactly_one_mask_token(_a ) def __lowercase ( self , _a , _a=None , **_a ) -> Dict[str, GenericTensor]: if return_tensors is None: _a : Union[str, Any] = self.framework _a : str = self.tokenizer(_a , return_tensors=_a ) self.ensure_exactly_one_mask_token(_a ) return model_inputs def __lowercase ( self , _a ) -> Optional[Any]: _a : List[str] = self.model(**_a ) _a : Any = model_inputs['''input_ids'''] return model_outputs def __lowercase ( self , _a , _a=5 , _a=None ) -> str: # Cap top_k if there are targets if target_ids is not None and target_ids.shape[0] < top_k: _a : List[Any] = target_ids.shape[0] _a : Any = model_outputs['''input_ids'''][0] _a : List[str] = model_outputs['''logits'''] if self.framework == "tf": _a : Tuple = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0] _a : List[str] = outputs.numpy() _a : Dict = outputs[0, masked_index, :] _a : str = stable_softmax(_a , axis=-1 ) if target_ids is not None: _a : Any = tf.gather_nd(tf.squeeze(_a , 0 ) , target_ids.reshape(-1 , 1 ) ) _a : Union[str, Any] = tf.expand_dims(_a , 0 ) _a : Optional[int] = tf.math.top_k(_a , k=_a ) _a , _a : Optional[Any] = topk.values.numpy(), topk.indices.numpy() else: _a : Optional[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_a ).squeeze(-1 ) # Fill mask pipeline supports only one ${mask_token} per sample _a : List[str] = outputs[0, masked_index, :] _a : List[Any] = logits.softmax(dim=-1 ) if target_ids is not None: _a : List[Any] = probs[..., target_ids] _a , _a : Optional[Any] = probs.topk(_a ) _a : Dict = [] _a : List[Any] = values.shape[0] == 1 for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ): _a : Optional[Any] = [] for v, p in zip(_values , _predictions ): # Copy is important since we're going to modify this array in place _a : Optional[int] = input_ids.numpy().copy() if target_ids is not None: _a : Tuple = target_ids[p].tolist() _a : List[str] = p # Filter padding out: _a : List[Any] = tokens[np.where(tokens != self.tokenizer.pad_token_id )] # Originally we skip special tokens to give readable output. # For multi masks though, the other [MASK] would be removed otherwise # making the output look odd, so we add them back _a : List[str] = self.tokenizer.decode(_a , skip_special_tokens=_a ) _a : List[Any] = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence} row.append(_a ) result.append(_a ) if single_mask: return result[0] return result def __lowercase ( self , _a , _a=None ) -> Dict: if isinstance(_a , _a ): _a : Tuple = [targets] try: _a : int = self.tokenizer.get_vocab() except Exception: _a : Any = {} _a : List[Any] = [] for target in targets: _a : List[Any] = vocab.get(_a , _a ) if id_ is None: _a : Tuple = self.tokenizer( _a , add_special_tokens=_a , return_attention_mask=_a , return_token_type_ids=_a , max_length=1 , truncation=_a , )['''input_ids'''] if len(_a ) == 0: logger.warning( F"""The specified target token `{target}` does not exist in the model vocabulary. """ '''We cannot replace it with anything meaningful, ignoring it''' ) continue _a : Tuple = input_ids[0] # XXX: If users encounter this pass # it becomes pretty slow, so let's make sure # The warning enables them to fix the input to # get faster performance. logger.warning( F"""The specified target token `{target}` does not exist in the model vocabulary. """ F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" ) target_ids.append(id_ ) _a : List[str] = list(set(_a ) ) if len(_a ) == 0: raise ValueError('''At least one target must be provided when passed.''' ) _a : int = np.array(_a ) return target_ids def __lowercase ( self , _a=None , _a=None ) -> Tuple: _a : str = {} if targets is not None: _a : List[Any] = self.get_target_ids(_a , _a ) _a : Optional[Any] = target_ids if top_k is not None: _a : Union[str, Any] = top_k if self.tokenizer.mask_token_id is None: raise PipelineException( '''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' ) return {}, {}, postprocess_params def __call__( self , _a , *_a , **_a ) -> int: _a : Optional[Any] = super().__call__(_a , **_a ) if isinstance(_a , _a ) and len(_a ) == 1: return outputs[0] return outputs
15
0
from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : Tuple = "EncodecFeatureExtractor" UpperCAmelCase__ : str = ("T5Tokenizer", "T5TokenizerFast") def __init__( self , _a , _a ) -> Dict: super().__init__(_a , _a ) _a : int = self.feature_extractor _a : Optional[int] = False def __lowercase ( self , _a=None , _a=None , _a=True ) -> List[str]: return self.tokenizer.get_decoder_prompt_ids(task=_a , language=_a , no_timestamps=_a ) def __call__( self , *_a , **_a ) -> int: # For backward compatibility if self._in_target_context_manager: return self.current_processor(*_a , **_a ) _a : Any = kwargs.pop('''audio''' , _a ) _a : Tuple = kwargs.pop('''sampling_rate''' , _a ) _a : List[Any] = kwargs.pop('''text''' , _a ) if len(_a ) > 0: _a : Any = args[0] _a : Tuple = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if text is not None: _a : Any = self.tokenizer(_a , **_a ) if audio is not None: _a : Tuple = self.feature_extractor(_a , *_a , sampling_rate=_a , **_a ) if audio is None: return inputs elif text is None: return audio_inputs else: _a : Any = audio_inputs['''input_values'''] if "padding_mask" in audio_inputs: _a : List[Any] = audio_inputs['''padding_mask'''] return inputs def __lowercase ( self , *_a , **_a ) -> Optional[int]: _a : Tuple = kwargs.pop('''audio''' , _a ) _a : List[Any] = kwargs.pop('''padding_mask''' , _a ) if len(_a ) > 0: _a : List[str] = args[0] _a : Optional[Any] = args[1:] if audio_values is not None: return self._decode_audio(_a , padding_mask=_a ) else: return self.tokenizer.batch_decode(*_a , **_a ) def __lowercase ( self , *_a , **_a ) -> Any: return self.tokenizer.decode(*_a , **_a ) def __lowercase ( self , _a , _a = None ) -> List[np.ndarray]: _a : Any = to_numpy(_a ) _a : Optional[int] = audio_values.shape if padding_mask is None: return list(_a ) _a : List[str] = to_numpy(_a ) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) _a : Optional[Any] = seq_len - padding_mask.shape[-1] _a : Dict = 1 - self.feature_extractor.padding_value _a : Dict = np.pad(_a , ((0, 0), (0, difference)) , '''constant''' , constant_values=_a ) _a : Optional[Any] = audio_values.tolist() for i in range(_a ): _a : Any = np.asarray(audio_values[i] )[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] _a : Optional[int] = sliced_audio.reshape(_a , -1 ) return audio_values
356
import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow a__ = [ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ '''text-classification''', '''language-modeling''', '''summarization''', '''token-classification''', '''question-answering''', ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_mlm_flax import run_qa import run_summarization_flax import run_ta_mlm_flax logging.basicConfig(level=logging.DEBUG) a__ = logging.getLogger() def __UpperCAmelCase ( ) -> Optional[int]: """simple docstring""" _a : Any = argparse.ArgumentParser() parser.add_argument('''-f''' ) _a : Dict = parser.parse_args() return args.f def __UpperCAmelCase ( __a : Optional[int] ,__a : List[str]="eval" ) -> Any: """simple docstring""" _a : Any = os.path.join(__a ,F"""{split}_results.json""" ) if os.path.exists(__a ): with open(__a ,'''r''' ) as f: return json.load(__a ) raise ValueError(F"""can't find {path}""" ) a__ = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class UpperCAmelCase_ ( __lowercase ): """simple docstring""" def __lowercase ( self ) -> str: _a : Any = self.get_auto_remove_tmp_dir() _a : Optional[Any] = F""" run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --eval_steps=2 --warmup_steps=2 --seed=42 --max_seq_length=128 """.split() with patch.object(_a , '''argv''' , _a ): run_flax_glue.main() _a : Any = get_results(_a ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) @slow def __lowercase ( self ) -> Dict: _a : Tuple = self.get_auto_remove_tmp_dir() _a : Tuple = F""" run_clm_flax.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --block_size 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir """.split() with patch.object(_a , '''argv''' , _a ): run_clm_flax.main() _a : List[str] = get_results(_a ) self.assertLess(result['''eval_perplexity'''] , 1_0_0 ) @slow def __lowercase ( self ) -> Optional[int]: _a : str = self.get_auto_remove_tmp_dir() _a : Optional[int] = F""" run_summarization.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --test_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=8 --do_train --do_eval --do_predict --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --predict_with_generate """.split() with patch.object(_a , '''argv''' , _a ): run_summarization_flax.main() _a : Optional[int] = get_results(_a , split='''test''' ) self.assertGreaterEqual(result['''test_rouge1'''] , 1_0 ) self.assertGreaterEqual(result['''test_rouge2'''] , 2 ) self.assertGreaterEqual(result['''test_rougeL'''] , 7 ) self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 ) @slow def __lowercase ( self ) -> Tuple: _a : List[str] = self.get_auto_remove_tmp_dir() _a : List[Any] = F""" run_mlm.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --overwrite_output_dir --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --logging_steps 2 --eval_steps 2 --do_train --do_eval --num_train_epochs=1 """.split() with patch.object(_a , '''argv''' , _a ): run_mlm_flax.main() _a : List[Any] = get_results(_a ) self.assertLess(result['''eval_perplexity'''] , 4_2 ) @slow def __lowercase ( self ) -> Dict: _a : Optional[Any] = self.get_auto_remove_tmp_dir() _a : int = F""" run_t5_mlm_flax.py --model_name_or_path t5-small --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir """.split() with patch.object(_a , '''argv''' , _a ): run_ta_mlm_flax.main() _a : List[Any] = get_results(_a ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 ) @slow def __lowercase ( self ) -> Optional[Any]: # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu _a : Any = 7 if get_gpu_count() > 1 else 2 _a : List[Any] = self.get_auto_remove_tmp_dir() _a : List[Any] = F""" run_flax_ner.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --overwrite_output_dir --do_train --do_eval --warmup_steps=2 --learning_rate=2e-4 --logging_steps 2 --eval_steps 2 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 """.split() with patch.object(_a , '''argv''' , _a ): run_flax_ner.main() _a : Dict = get_results(_a ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) self.assertGreaterEqual(result['''eval_f1'''] , 0.3 ) @slow def __lowercase ( self ) -> Any: _a : Optional[int] = self.get_auto_remove_tmp_dir() _a : Union[str, Any] = F""" run_qa.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=2 --do_train --do_eval --logging_steps 2 --eval_steps 2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 """.split() with patch.object(_a , '''argv''' , _a ): run_qa.main() _a : Any = get_results(_a ) self.assertGreaterEqual(result['''eval_f1'''] , 3_0 ) self.assertGreaterEqual(result['''eval_exact'''] , 3_0 )
15
0
import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase_ ( __lowercase , unittest.TestCase ): """simple docstring""" UpperCAmelCase__ : List[Any] = LongformerTokenizer UpperCAmelCase__ : Optional[int] = True UpperCAmelCase__ : List[str] = LongformerTokenizerFast UpperCAmelCase__ : Any = True def __lowercase ( self ) -> int: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt _a : Union[str, Any] = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] _a : Optional[Any] = dict(zip(_a , range(len(_a ) ) ) ) _a : Dict = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] _a : List[Any] = {'''unk_token''': '''<unk>'''} _a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) _a : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_a ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(_a ) ) def __lowercase ( self , **_a ) -> int: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **_a ) def __lowercase ( self , **_a ) -> Tuple: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_a ) def __lowercase ( self , _a ) -> Union[str, Any]: _a : Optional[Any] = '''lower newer''' _a : Optional[int] = '''lower newer''' return input_text, output_text def __lowercase ( self ) -> Union[str, Any]: _a : Optional[Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) _a : Any = '''lower newer''' _a : str = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] _a : Optional[Any] = tokenizer.tokenize(_a ) # , add_prefix_space=True) self.assertListEqual(_a , _a ) _a : Dict = tokens + [tokenizer.unk_token] _a : Union[str, Any] = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a ) def __lowercase ( self ) -> int: _a : str = self.get_tokenizer() self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=_a ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] ) self.assertListEqual( tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=_a ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , ) @slow def __lowercase ( self ) -> List[str]: _a : Optional[Any] = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' ) _a : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=_a ) _a : Optional[int] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_a ) _a : List[str] = tokenizer.encode( '''sequence builders''' , add_special_tokens=_a , add_prefix_space=_a ) _a : Optional[int] = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=_a , add_prefix_space=_a ) _a : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_a ) _a : Optional[int] = tokenizer.build_inputs_with_special_tokens(_a , _a ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def __lowercase ( self ) -> Optional[int]: _a : Dict = self.get_tokenizer() _a : Any = '''Encode this sequence.''' _a : Tuple = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]] # Testing encoder arguments _a : Any = tokenizer.encode(_a , add_special_tokens=_a , add_prefix_space=_a ) _a : List[str] = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(_a , _a ) _a : str = tokenizer.encode(_a , add_special_tokens=_a , add_prefix_space=_a ) _a : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(_a , _a ) tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} ) _a : List[str] = tokenizer.encode(_a , add_special_tokens=_a ) _a : List[Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(_a , _a ) # Testing spaces after special tokens _a : str = '''<mask>''' tokenizer.add_special_tokens( {'''mask_token''': AddedToken(_a , lstrip=_a , rstrip=_a )} ) # mask token has a left space _a : Optional[int] = tokenizer.convert_tokens_to_ids(_a ) _a : Optional[Any] = '''Encode <mask> sequence''' _a : Tuple = '''Encode <mask>sequence''' _a : Optional[Any] = tokenizer.encode(_a ) _a : Optional[Any] = encoded.index(_a ) _a : str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(_a , _a ) _a : Optional[Any] = tokenizer.encode(_a ) _a : int = encoded.index(_a ) _a : Dict = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(_a , _a ) def __lowercase ( self ) -> Any: pass def __lowercase ( self ) -> str: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): _a : Tuple = self.rust_tokenizer_class.from_pretrained(_a , **_a ) _a : Union[str, Any] = self.tokenizer_class.from_pretrained(_a , **_a ) _a : List[Any] = '''A, <mask> AllenNLP sentence.''' _a : str = tokenizer_r.encode_plus(_a , add_special_tokens=_a , return_token_type_ids=_a ) _a : List[str] = tokenizer_p.encode_plus(_a , add_special_tokens=_a , return_token_type_ids=_a ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , ) _a : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] ) _a : List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] ) self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] ) self.assertSequenceEqual( _a , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) self.assertSequenceEqual( _a , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) def __lowercase ( self ) -> Optional[int]: for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): _a : str = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=_a , add_prefix_space=_a , trim_offsets=_a ) _a : int = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) _a : Dict = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , _a ) self.assertEqual(post_processor_state['''add_prefix_space'''] , _a ) self.assertEqual(post_processor_state['''trim_offsets'''] , _a ) def __lowercase ( self ) -> Union[str, Any]: # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): _a : List[str] = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name` _a : List[str] = F"""{text_of_1_token} {text_of_1_token}""" _a : List[str] = self.rust_tokenizer_class.from_pretrained( _a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a ) _a : Optional[int] = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_a ) + 1, len(_a ) + 1 + len(_a )) , ) _a : Tuple = self.rust_tokenizer_class.from_pretrained( _a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a ) _a : int = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_a ) + 1, len(_a ) + 1 + len(_a )) , ) _a : Optional[int] = self.rust_tokenizer_class.from_pretrained( _a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a ) _a : str = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_a ), len(_a ) + 1 + len(_a )) , ) _a : Optional[Any] = self.rust_tokenizer_class.from_pretrained( _a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a ) _a : Union[str, Any] = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_a ), len(_a ) + 1 + len(_a )) , ) _a : List[str] = F""" {text}""" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) _a : str = self.rust_tokenizer_class.from_pretrained( _a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a ) _a : str = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_a )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(_a ) + 1, 1 + len(_a ) + 1 + len(_a )) , ) _a : Optional[int] = self.rust_tokenizer_class.from_pretrained( _a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a ) _a : Dict = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_a )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(_a ), 1 + len(_a ) + 1 + len(_a )) , ) _a : List[str] = self.rust_tokenizer_class.from_pretrained( _a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a ) _a : str = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_a )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(_a ), 1 + len(_a ) + 1 + len(_a )) , )
357
import argparse import os import re import packaging.version a__ = '''examples/''' a__ = { '''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''), '''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''), '''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''), '''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''), } a__ = { '''init''': '''src/transformers/__init__.py''', '''setup''': '''setup.py''', } a__ = '''README.md''' def __UpperCAmelCase ( __a : List[str] ,__a : int ,__a : Optional[Any] ) -> int: """simple docstring""" with open(__a ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f: _a : Tuple = f.read() _a , _a : str = REPLACE_PATTERNS[pattern] _a : List[str] = replace.replace('''VERSION''' ,__a ) _a : List[Any] = re_pattern.sub(__a ,__a ) with open(__a ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f: f.write(__a ) def __UpperCAmelCase ( __a : Any ) -> List[Any]: """simple docstring""" for folder, directories, fnames in os.walk(__a ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('''research_projects''' ) if "legacy" in directories: directories.remove('''legacy''' ) for fname in fnames: if fname.endswith('''.py''' ): update_version_in_file(os.path.join(__a ,__a ) ,__a ,pattern='''examples''' ) def __UpperCAmelCase ( __a : List[Any] ,__a : List[str]=False ) -> int: """simple docstring""" for pattern, fname in REPLACE_FILES.items(): update_version_in_file(__a ,__a ,__a ) if not patch: update_version_in_examples(__a ) def __UpperCAmelCase ( ) -> List[str]: """simple docstring""" _a : Optional[Any] = '''🤗 Transformers currently provides the following architectures''' _a : str = '''1. Want to contribute a new model?''' with open(__a ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f: _a : Optional[int] = f.readlines() # Find the start of the list. _a : Optional[int] = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 _a : List[Any] = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('''1.''' ): _a : Tuple = lines[index].replace( '''https://huggingface.co/docs/transformers/main/model_doc''' ,'''https://huggingface.co/docs/transformers/model_doc''' ,) index += 1 with open(__a ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f: f.writelines(__a ) def __UpperCAmelCase ( ) -> List[str]: """simple docstring""" with open(REPLACE_FILES['''init'''] ,'''r''' ) as f: _a : Optional[Any] = f.read() _a : Optional[Any] = REPLACE_PATTERNS['''init'''][0].search(__a ).groups()[0] return packaging.version.parse(__a ) def __UpperCAmelCase ( __a : Dict=False ) -> str: """simple docstring""" _a : Optional[Any] = get_version() if patch and default_version.is_devrelease: raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' ) if default_version.is_devrelease: _a : List[Any] = default_version.base_version elif patch: _a : str = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}""" else: _a : List[str] = F"""{default_version.major}.{default_version.minor + 1}.0""" # Now let's ask nicely if that's the right one. _a : Dict = input(F"""Which version are you releasing? [{default_version}]""" ) if len(__a ) == 0: _a : int = default_version print(F"""Updating version to {version}.""" ) global_version_update(__a ,patch=__a ) if not patch: print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() def __UpperCAmelCase ( ) -> Tuple: """simple docstring""" _a : str = get_version() _a : int = F"""{current_version.major}.{current_version.minor + 1}.0.dev0""" _a : List[Any] = current_version.base_version # Check with the user we got that right. _a : Union[str, Any] = input(F"""Which version are we developing now? [{dev_version}]""" ) if len(__a ) == 0: _a : List[str] = dev_version print(F"""Updating version to {version}.""" ) global_version_update(__a ) print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() if __name__ == "__main__": a__ = argparse.ArgumentParser() parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''') parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''') a__ = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('''Nothing to do after a patch :-)''') else: post_release_work()
15
0
import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCAmelCase_ : """simple docstring""" def __init__( self , _a , _a=1_3 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=9_9 , _a=1_6 , _a=3_6 , _a=6 , _a=6 , _a=6 , _a=3_7 , _a="gelu" , _a=0.1 , _a=0.1 , _a=5_1_2 , _a=1_6 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ) -> Optional[Any]: _a : Union[str, Any] = parent _a : int = batch_size _a : Dict = seq_length _a : Union[str, Any] = is_training _a : Union[str, Any] = use_input_mask _a : int = use_token_type_ids _a : str = use_labels _a : int = vocab_size _a : Optional[Any] = embedding_size _a : str = hidden_size _a : Optional[int] = num_hidden_layers _a : str = num_hidden_groups _a : List[Any] = num_attention_heads _a : List[Any] = intermediate_size _a : List[str] = hidden_act _a : int = hidden_dropout_prob _a : Optional[int] = attention_probs_dropout_prob _a : List[str] = max_position_embeddings _a : List[Any] = type_vocab_size _a : List[str] = type_sequence_label_size _a : Optional[Any] = initializer_range _a : Tuple = num_labels _a : str = num_choices _a : Tuple = scope def __lowercase ( self ) -> Optional[Any]: _a : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _a : Optional[Any] = None if self.use_input_mask: _a : Dict = random_attention_mask([self.batch_size, self.seq_length] ) _a : List[Any] = None if self.use_token_type_ids: _a : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _a : List[str] = None _a : Tuple = None _a : Any = None if self.use_labels: _a : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _a : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _a : List[str] = ids_tensor([self.batch_size] , self.num_choices ) _a : List[str] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowercase ( self ) -> Tuple: return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def __lowercase ( self , _a , _a , _a , _a , _a , _a , _a ) -> Dict: _a : Dict = AlbertModel(config=_a ) model.to(_a ) model.eval() _a : List[Any] = model(_a , attention_mask=_a , token_type_ids=_a ) _a : Union[str, Any] = model(_a , token_type_ids=_a ) _a : str = model(_a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __lowercase ( self , _a , _a , _a , _a , _a , _a , _a ) -> List[Any]: _a : Any = AlbertForPreTraining(config=_a ) model.to(_a ) model.eval() _a : Dict = model( _a , attention_mask=_a , token_type_ids=_a , labels=_a , sentence_order_label=_a , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def __lowercase ( self , _a , _a , _a , _a , _a , _a , _a ) -> Tuple: _a : Dict = AlbertForMaskedLM(config=_a ) model.to(_a ) model.eval() _a : Tuple = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowercase ( self , _a , _a , _a , _a , _a , _a , _a ) -> Optional[int]: _a : Optional[Any] = AlbertForQuestionAnswering(config=_a ) model.to(_a ) model.eval() _a : Tuple = model( _a , attention_mask=_a , token_type_ids=_a , start_positions=_a , end_positions=_a , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowercase ( self , _a , _a , _a , _a , _a , _a , _a ) -> str: _a : Tuple = self.num_labels _a : Dict = AlbertForSequenceClassification(_a ) model.to(_a ) model.eval() _a : List[Any] = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowercase ( self , _a , _a , _a , _a , _a , _a , _a ) -> List[Any]: _a : Optional[int] = self.num_labels _a : Union[str, Any] = AlbertForTokenClassification(config=_a ) model.to(_a ) model.eval() _a : Optional[int] = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowercase ( self , _a , _a , _a , _a , _a , _a , _a ) -> Optional[int]: _a : Dict = self.num_choices _a : int = AlbertForMultipleChoice(config=_a ) model.to(_a ) model.eval() _a : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _a : str = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _a : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _a : Tuple = model( _a , attention_mask=_a , token_type_ids=_a , labels=_a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowercase ( self ) -> Union[str, Any]: _a : List[str] = self.prepare_config_and_inputs() ( _a ) : Optional[int] = config_and_inputs _a : List[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ): """simple docstring""" UpperCAmelCase__ : Dict = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) UpperCAmelCase__ : Optional[int] = ( { "feature-extraction": AlbertModel, "fill-mask": AlbertForMaskedLM, "question-answering": AlbertForQuestionAnswering, "text-classification": AlbertForSequenceClassification, "token-classification": AlbertForTokenClassification, "zero-shot": AlbertForSequenceClassification, } if is_torch_available() else {} ) UpperCAmelCase__ : Dict = True def __lowercase ( self , _a , _a , _a=False ) -> List[str]: _a : int = super()._prepare_for_class(_a , _a , return_labels=_a ) if return_labels: if model_class in get_values(_a ): _a : Any = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_a ) _a : str = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_a ) return inputs_dict def __lowercase ( self ) -> str: _a : Tuple = AlbertModelTester(self ) _a : int = ConfigTester(self , config_class=_a , hidden_size=3_7 ) def __lowercase ( self ) -> Dict: self.config_tester.run_common_tests() def __lowercase ( self ) -> Any: _a : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_a ) def __lowercase ( self ) -> str: _a : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*_a ) def __lowercase ( self ) -> int: _a : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_a ) def __lowercase ( self ) -> Optional[int]: _a : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_a ) def __lowercase ( self ) -> Union[str, Any]: _a : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_a ) def __lowercase ( self ) -> Union[str, Any]: _a : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_a ) def __lowercase ( self ) -> Any: _a : Optional[int] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _a : Any = type self.model_tester.create_and_check_model(*_a ) @slow def __lowercase ( self ) -> Dict: for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _a : List[str] = AlbertModel.from_pretrained(_a ) self.assertIsNotNone(_a ) @require_torch class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" @slow def __lowercase ( self ) -> int: _a : Optional[int] = AlbertModel.from_pretrained('''albert-base-v2''' ) _a : int = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) _a : int = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): _a : List[str] = model(_a , attention_mask=_a )[0] _a : List[str] = torch.Size((1, 1_1, 7_6_8) ) self.assertEqual(output.shape , _a ) _a : List[Any] = torch.tensor( [[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _a , atol=1e-4 ) )
358
def __UpperCAmelCase ( __a : int ) -> int: """simple docstring""" if n == 1 or not isinstance(__a ,__a ): return 0 elif n == 2: return 1 else: _a : Any = [0, 1] for i in range(2 ,n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def __UpperCAmelCase ( __a : int ) -> int: """simple docstring""" _a : Any = 0 _a : Dict = 2 while digits < n: index += 1 _a : Dict = len(str(fibonacci(__a ) ) ) return index def __UpperCAmelCase ( __a : int = 1_000 ) -> int: """simple docstring""" return fibonacci_digits_index(__a ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
15
0
import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation a__ = logging.get_logger(__name__) a__ = {'''vocab_file''': '''spiece.model'''} a__ = { '''vocab_file''': { '''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''', '''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''', '''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''', '''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''', '''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''', } } a__ = { '''AI-Sweden/gpt-sw3-126m''': 2048, '''AI-Sweden/gpt-sw3-350m''': 2048, '''AI-Sweden/gpt-sw3-1.6b''': 2048, '''AI-Sweden/gpt-sw3-6.7b''': 2048, '''AI-Sweden/gpt-sw3-20b''': 2048, } class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : str = VOCAB_FILES_NAMES UpperCAmelCase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ : Optional[Any] = ["input_ids", "attention_mask"] def __init__( self , _a , _a=False , _a=False , _a=False , _a=None , _a=None , _a=None , _a=None , _a = None , **_a , ) -> None: _a : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs _a : Union[str, Any] = kwargs.get('''name_or_path''' ) if name_or_path is None: logger.warning( '''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,''' ''' you are testing the model, this can safely be ignored''' ) _a : Union[str, Any] = '''None''' # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing _a : Any = '''<|endoftext|>''' if eos_token is None else eos_token _a : Any = '''<unk>''' if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: _a : Union[str, Any] = unk_token if pad_token is None else pad_token _a : Tuple = eos_token if bos_token is None else bos_token else: _a : Optional[Any] = '''<pad>''' if pad_token is None else pad_token _a : Union[str, Any] = '''<s>''' if bos_token is None else bos_token super().__init__( do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , pad_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , ) _a : Optional[Any] = do_lower_case _a : Any = remove_space _a : List[Any] = keep_accents _a : Tuple = vocab_file _a : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_a ) # Used for whitespace normalization in input texts # fmt : off _a : Union[str, Any] = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', '''„'''} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing _a : Any = re.compile( F"""[{''.join(map(_a , list(range(0 , 9 ) ) + list(range(1_1 , 3_2 ) ) + list(range(1_2_7 , 1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]""" ) def __getstate__( self ) -> List[str]: _a : Optional[Any] = self.__dict__.copy() _a : Any = None return state def __setstate__( self , _a ) -> Dict: _a : int = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): _a : Tuple = {} _a : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def __lowercase ( self ) -> int: return len(self.sp_model ) def __lowercase ( self , _a ) -> str: _a : Optional[int] = self.non_printing_characters_re.sub('''''' , _a ) # Normalize whitespaces _a : Dict = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] ) # NFC Unicode normalization _a : Optional[Any] = unicodedata.normalize('''NFC''' , _a ) return text def __lowercase ( self , _a , **_a ) -> List[str]: _a : Optional[Any] = self.preprocess_text(_a ) return self.sp_model.encode(_a , out_type=_a ) def __lowercase ( self , _a ) -> int: return self.sp_model.PieceToId(_a ) def __lowercase ( self , _a ) -> str: return self.sp_model.IdToPiece(_a ) @staticmethod def __lowercase ( _a ) -> str: return out_string def __lowercase ( self , _a ) -> str: _a : int = [] _a : Any = '''''' _a : Tuple = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_a ) + token _a : Tuple = True _a : List[str] = [] else: current_sub_tokens.append(_a ) _a : Any = False out_string += self.sp_model.decode(_a ) return out_string def __lowercase ( self ) -> Dict[str, int]: _a : List[str] = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __lowercase ( self , _a , _a = None ) -> Tuple[str]: if not os.path.isdir(_a ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return _a : Any = os.path.join( _a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _a ) elif not os.path.isfile(self.vocab_file ): with open(_a , '''wb''' ) as fi: _a : Optional[int] = self.sp_model.serialized_model_proto() fi.write(_a ) return (out_vocab_file,) def __lowercase ( self , _a , _a = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]: if isinstance(_a , _a ): _a : Tuple = self.preprocess_text(_a ) _a : int = self.sp_model.encode(_a ) else: _a : Dict = [self.preprocess_text(_a ) for t in text] _a : Optional[int] = self.sp_model.encode(_a ) if return_tensors is True or return_tensors == "pt": _a : List[str] = torch.tensor(_a ) return token_ids def __lowercase ( self , _a ) -> str: return self.sp_model.decode(_a ) def __lowercase ( self , _a ) -> List[int]: _a : List[Any] = [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()] _a : Optional[int] = ( F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(_a ) + F"""{self.bos_token}Bot:""" ) return self.encode(text=_a )
359
from sklearn.metrics import fa_score, matthews_corrcoef import datasets from .record_evaluation import evaluate as evaluate_record a__ = '''\ @article{wang2019superglue, title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems}, author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R}, journal={arXiv preprint arXiv:1905.00537}, year={2019} } ''' a__ = '''\ SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after GLUE with a new set of more difficult language understanding tasks, improved resources, and a new public leaderboard. ''' a__ = ''' Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset. Args: predictions: list of predictions to score. Depending on the SuperGlUE subset: - for \'record\': list of question-answer dictionaries with the following keys: - \'idx\': index of the question as specified by the dataset - \'prediction_text\': the predicted answer text - for \'multirc\': list of question-answer dictionaries with the following keys: - \'idx\': index of the question-answer pair as specified by the dataset - \'prediction\': the predicted answer label - otherwise: list of predicted labels references: list of reference labels. Depending on the SuperGLUE subset: - for \'record\': list of question-answers dictionaries with the following keys: - \'idx\': index of the question as specified by the dataset - \'answers\': list of possible answers - otherwise: list of reference labels Returns: depending on the SuperGLUE subset: - for \'record\': - \'exact_match\': Exact match between answer and gold answer - \'f1\': F1 score - for \'multirc\': - \'exact_match\': Exact match between answer and gold answer - \'f1_m\': Per-question macro-F1 score - \'f1_a\': Average F1 score over all answers - for \'axb\': \'matthews_correlation\': Matthew Correlation - for \'cb\': - \'accuracy\': Accuracy - \'f1\': F1 score - for all others: - \'accuracy\': Accuracy Examples: >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"] >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\') >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0, \'f1\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\') >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}] >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 1.0, \'f1\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\') >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'matthews_correlation\': 1.0} ''' def __UpperCAmelCase ( __a : int ,__a : List[str] ) -> Optional[Any]: """simple docstring""" return float((preds == labels).mean() ) def __UpperCAmelCase ( __a : List[Any] ,__a : Union[str, Any] ,__a : List[str]="binary" ) -> Optional[int]: """simple docstring""" _a : List[str] = simple_accuracy(__a ,__a ) _a : Any = float(fa_score(y_true=__a ,y_pred=__a ,average=__a ) ) return { "accuracy": acc, "f1": fa, } def __UpperCAmelCase ( __a : Optional[Any] ,__a : str ) -> List[Any]: """simple docstring""" _a : Union[str, Any] = {} for id_pred, label in zip(__a ,__a ): _a : Optional[int] = F"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}""" _a : Optional[Any] = id_pred['''prediction'''] if question_id in question_map: question_map[question_id].append((pred, label) ) else: _a : str = [(pred, label)] _a , _a : Any = [], [] for question, preds_labels in question_map.items(): _a , _a : Any = zip(*__a ) _a : List[Any] = fa_score(y_true=__a ,y_pred=__a ,average='''macro''' ) fas.append(__a ) _a : List[str] = int(sum(pred == label for pred, label in preds_labels ) == len(__a ) ) ems.append(__a ) _a : List[str] = float(sum(__a ) / len(__a ) ) _a : str = sum(__a ) / len(__a ) _a : Optional[int] = float(fa_score(y_true=__a ,y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) ) return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a} @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase_ ( datasets.Metric ): """simple docstring""" def __lowercase ( self ) -> List[Any]: if self.config_name not in [ "boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg", ]: raise KeyError( '''You should supply a configuration name selected in ''' '''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , ) def __lowercase ( self ) -> Any: if self.config_name == "record": return { "predictions": { "idx": { "passage": datasets.Value('''int64''' ), "query": datasets.Value('''int64''' ), }, "prediction_text": datasets.Value('''string''' ), }, "references": { "idx": { "passage": datasets.Value('''int64''' ), "query": datasets.Value('''int64''' ), }, "answers": datasets.Sequence(datasets.Value('''string''' ) ), }, } elif self.config_name == "multirc": return { "predictions": { "idx": { "answer": datasets.Value('''int64''' ), "paragraph": datasets.Value('''int64''' ), "question": datasets.Value('''int64''' ), }, "prediction": datasets.Value('''int64''' ), }, "references": datasets.Value('''int64''' ), } else: return { "predictions": datasets.Value('''int64''' ), "references": datasets.Value('''int64''' ), } def __lowercase ( self , _a , _a ) -> Optional[Any]: if self.config_name == "axb": return {"matthews_correlation": matthews_corrcoef(_a , _a )} elif self.config_name == "cb": return acc_and_fa(_a , _a , fa_avg='''macro''' ) elif self.config_name == "record": _a : Any = [ { '''qas''': [ {'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]} for ref in references ] } ] _a : Any = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions} return evaluate_record(_a , _a )[0] elif self.config_name == "multirc": return evaluate_multirc(_a , _a ) elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]: return {"accuracy": simple_accuracy(_a , _a )} else: raise KeyError( '''You should supply a configuration name selected in ''' '''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
15
0
from typing import List import numpy as np def __UpperCAmelCase ( __a : dict ) -> int: """simple docstring""" _a : int = {key: len(__a ) for key, value in gen_kwargs.items() if isinstance(__a ,__a )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( '''Sharding is ambiguous for this dataset: ''' + '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n''' + '''\n'''.join(F"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() ) + '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, ''' + '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.''' ) ) _a : Union[str, Any] = max(lists_lengths.values() ,default=0 ) return max(1 ,__a ) def __UpperCAmelCase ( __a : int ,__a : int ) -> List[range]: """simple docstring""" _a : List[str] = [] for group_idx in range(__a ): _a : str = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break _a : int = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 _a : Dict = range(__a ,start + num_shards_to_add ) shards_indices_per_group.append(__a ) return shards_indices_per_group def __UpperCAmelCase ( __a : dict ,__a : int ) -> List[dict]: """simple docstring""" _a : Dict = _number_of_shards_in_gen_kwargs(__a ) if num_shards == 1: return [dict(__a )] else: _a : Optional[int] = _distribute_shards(num_shards=__a ,max_num_jobs=__a ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(__a ,__a ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(__a ) ) ] def __UpperCAmelCase ( __a : List[dict] ) -> dict: """simple docstring""" return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] ,__a ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def __UpperCAmelCase ( __a : np.random.Generator ,__a : dict ) -> dict: """simple docstring""" _a : Dict = {len(__a ) for value in gen_kwargs.values() if isinstance(__a ,__a )} _a : List[Any] = {} for size in list_sizes: _a : Dict = list(range(__a ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes _a : List[Any] = dict(__a ) for key, value in shuffled_kwargs.items(): if isinstance(__a ,__a ): _a : Tuple = [value[i] for i in indices_per_size[len(__a )]] return shuffled_kwargs
360
import numpy as np def __UpperCAmelCase ( __a : np.ndarray ,__a : np.ndarray ,__a : float = 1E-12 ,__a : int = 100 ,) -> tuple[float, np.ndarray]: """simple docstring""" assert np.shape(__a )[0] == np.shape(__a )[1] # Ensure proper dimensionality. assert np.shape(__a )[0] == np.shape(__a )[0] # Ensure inputs are either both complex or both real assert np.iscomplexobj(__a ) == np.iscomplexobj(__a ) _a : List[str] = np.iscomplexobj(__a ) if is_complex: # Ensure complex input_matrix is Hermitian assert np.array_equal(__a ,input_matrix.conj().T ) # Set convergence to False. Will define convergence when we exceed max_iterations # or when we have small changes from one iteration to next. _a : List[str] = False _a : List[str] = 0 _a : Tuple = 0 _a : str = 1E12 while not convergence: # Multiple matrix by the vector. _a : str = np.dot(__a ,__a ) # Normalize the resulting output vector. _a : List[Any] = w / np.linalg.norm(__a ) # Find rayleigh quotient # (faster than usual b/c we know vector is normalized already) _a : Dict = vector.conj().T if is_complex else vector.T _a : Tuple = np.dot(__a ,np.dot(__a ,__a ) ) # Check convergence. _a : List[str] = np.abs(lambda_ - lambda_previous ) / lambda_ iterations += 1 if error <= error_tol or iterations >= max_iterations: _a : Dict = True _a : str = lambda_ if is_complex: _a : Tuple = np.real(lambda_ ) return lambda_, vector def __UpperCAmelCase ( ) -> None: """simple docstring""" _a : List[str] = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] ) _a : int = np.array([41, 4, 20] ) _a : Optional[Any] = real_input_matrix.astype(np.complexaaa ) _a : int = np.triu(1j * complex_input_matrix ,1 ) complex_input_matrix += imag_matrix complex_input_matrix += -1 * imag_matrix.T _a : Union[str, Any] = np.array([41, 4, 20] ).astype(np.complexaaa ) for problem_type in ["real", "complex"]: if problem_type == "real": _a : Optional[int] = real_input_matrix _a : Union[str, Any] = real_vector elif problem_type == "complex": _a : str = complex_input_matrix _a : str = complex_vector # Our implementation. _a , _a : Optional[Any] = power_iteration(__a ,__a ) # Numpy implementation. # Get eigenvalues and eigenvectors using built-in numpy # eigh (eigh used for symmetric or hermetian matrices). _a , _a : List[str] = np.linalg.eigh(__a ) # Last eigenvalue is the maximum one. _a : Tuple = eigen_values[-1] # Last column in this matrix is eigenvector corresponding to largest eigenvalue. _a : List[Any] = eigen_vectors[:, -1] # Check our implementation and numpy gives close answers. assert np.abs(eigen_value - eigen_value_max ) <= 1E-6 # Take absolute values element wise of each eigenvector. # as they are only unique to a minus sign. assert np.linalg.norm(np.abs(__a ) - np.abs(__a ) ) <= 1E-6 if __name__ == "__main__": import doctest doctest.testmod() test_power_iteration()
15
0
def __UpperCAmelCase ( __a : List[Any] ) -> Tuple: """simple docstring""" _a : str = [] _a : Union[str, Any] = set({'''(''', '''[''', '''{'''} ) _a : List[Any] = set({''')''', ''']''', '''}'''} ) _a : str = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''} for i in range(len(__a ) ): if s[i] in open_brackets: stack.append(s[i] ) elif s[i] in closed_brackets and ( len(__a ) == 0 or (len(__a ) > 0 and open_to_closed[stack.pop()] != s[i]) ): return False return len(__a ) == 0 def __UpperCAmelCase ( ) -> Union[str, Any]: """simple docstring""" _a : Any = input('''Enter sequence of brackets: ''' ) if is_balanced(__a ): print(__a ,'''is balanced''' ) else: print(__a ,'''is not balanced''' ) if __name__ == "__main__": main()
361
import itertools from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class UpperCAmelCase_ ( datasets.BuilderConfig ): """simple docstring""" UpperCAmelCase__ : Optional[datasets.Features] = None class UpperCAmelCase_ ( datasets.ArrowBasedBuilder ): """simple docstring""" UpperCAmelCase__ : Any = PandasConfig def __lowercase ( self ) -> Any: return datasets.DatasetInfo(features=self.config.features ) def __lowercase ( self , _a ) -> List[Any]: if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) _a : str = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_a , (str, list, tuple) ): _a : Dict = data_files if isinstance(_a , _a ): _a : Dict = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive _a : int = [dl_manager.iter_files(_a ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] _a : Optional[Any] = [] for split_name, files in data_files.items(): if isinstance(_a , _a ): _a : List[str] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive _a : Any = [dl_manager.iter_files(_a ) for file in files] splits.append(datasets.SplitGenerator(name=_a , gen_kwargs={'''files''': files} ) ) return splits def __lowercase ( self , _a ) -> pa.Table: if self.config.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example _a : Optional[Any] = table_cast(_a , self.config.features.arrow_schema ) return pa_table def __lowercase ( self , _a ) -> List[str]: for i, file in enumerate(itertools.chain.from_iterable(_a ) ): with open(_a , '''rb''' ) as f: _a : str = pa.Table.from_pandas(pd.read_pickle(_a ) ) yield i, self._cast_table(_a )
15
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a__ = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ = [ '''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ViTMSNModel''', '''ViTMSNForImageClassification''', '''ViTMSNPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
362
def __UpperCAmelCase ( __a : int ,__a : int ,__a : int ) -> int: """simple docstring""" if exponent == 1: return base if exponent % 2 == 0: _a : List[Any] = _modexpt(__a ,exponent // 2 ,__a ) % modulo_value return (x * x) % modulo_value else: return (base * _modexpt(__a ,exponent - 1 ,__a )) % modulo_value def __UpperCAmelCase ( __a : int = 1_777 ,__a : int = 1_855 ,__a : int = 8 ) -> int: """simple docstring""" _a : List[Any] = base for _ in range(1 ,__a ): _a : Any = _modexpt(__a ,__a ,10**digits ) return result if __name__ == "__main__": print(f'''{solution() = }''')
15
0
def __UpperCAmelCase ( __a : int ,__a : int ) -> int: """simple docstring""" return int((input_a, input_a).count(0 ) == 0 ) def __UpperCAmelCase ( ) -> None: """simple docstring""" assert and_gate(0 ,0 ) == 0 assert and_gate(0 ,1 ) == 0 assert and_gate(1 ,0 ) == 0 assert and_gate(1 ,1 ) == 1 if __name__ == "__main__": test_and_gate() print(and_gate(1, 0)) print(and_gate(0, 0)) print(and_gate(0, 1)) print(and_gate(1, 1))
363
import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging a__ = '''\ ''' a__ = ''' Perplexity (PPL) is one of the most common metrics for evaluating language models. It is defined as the exponentiated average negative log-likelihood of a sequence. For more information, see https://huggingface.co/docs/transformers/perplexity ''' a__ = ''' Args: model_id (str): model used for calculating Perplexity NOTE: Perplexity can only be calculated for causal language models. This includes models such as gpt2, causal variations of bert, causal versions of t5, and more (the full list can be found in the AutoModelForCausalLM documentation here: https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM ) input_texts (list of str): input text, each separate text snippet is one list entry. batch_size (int): the batch size to run texts through the model. Defaults to 16. add_start_token (bool): whether to add the start token to the texts, so the perplexity can include the probability of the first word. Defaults to True. device (str): device to run on, defaults to \'cuda\' when available Returns: perplexity: dictionary containing the perplexity scores for the texts in the input list, as well as the mean perplexity. If one of the input texts is longer than the max input length of the model, then it is truncated to the max length for the perplexity computation. Examples: Example 1: >>> perplexity = datasets.load_metric("perplexity") >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"] >>> results = perplexity.compute(model_id=\'gpt2\', ... add_start_token=False, ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) [\'perplexities\', \'mean_perplexity\'] >>> print(round(results["mean_perplexity"], 2)) 78.22 >>> print(round(results["perplexities"][0], 2)) 11.11 Example 2: >>> perplexity = datasets.load_metric("perplexity") >>> input_texts = datasets.load_dataset("wikitext", ... "wikitext-2-raw-v1", ... split="test")["text"][:50] # doctest:+ELLIPSIS [...] >>> input_texts = [s for s in input_texts if s!=\'\'] >>> results = perplexity.compute(model_id=\'gpt2\', ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) [\'perplexities\', \'mean_perplexity\'] >>> print(round(results["mean_perplexity"], 2)) 60.35 >>> print(round(results["perplexities"][0], 2)) 81.12 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase_ ( datasets.Metric ): """simple docstring""" def __lowercase ( self ) -> Any: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''input_texts''': datasets.Value('''string''' ), } ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , ) def __lowercase ( self , _a , _a , _a = 1_6 , _a = True , _a=None ) -> List[Any]: if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": _a : List[str] = '''cuda''' else: _a : Optional[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu''' _a : Dict = AutoModelForCausalLM.from_pretrained(_a ) _a : List[Any] = model.to(_a ) _a : List[str] = AutoTokenizer.from_pretrained(_a ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: _a : str = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(_a ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" _a : List[Any] = model.config.max_length - 1 else: _a : List[str] = model.config.max_length _a : Union[str, Any] = tokenizer( _a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , return_tensors='''pt''' , return_attention_mask=_a , ).to(_a ) _a : List[Any] = encodings['''input_ids'''] _a : int = encodings['''attention_mask'''] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." _a : Optional[int] = [] _a : Dict = CrossEntropyLoss(reduction='''none''' ) for start_index in logging.tqdm(range(0 , len(_a ) , _a ) ): _a : Dict = min(start_index + batch_size , len(_a ) ) _a : Union[str, Any] = encoded_texts[start_index:end_index] _a : int = attn_masks[start_index:end_index] if add_start_token: _a : Dict = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_a ) _a : List[str] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) _a : Dict = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_a ), attn_mask] , dim=1 ) _a : Dict = encoded_batch with torch.no_grad(): _a : Any = model(_a , attention_mask=_a ).logits _a : List[str] = out_logits[..., :-1, :].contiguous() _a : Union[str, Any] = labels[..., 1:].contiguous() _a : Optional[int] = attn_mask[..., 1:].contiguous() _a : Union[str, Any] = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , _a ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(_a )}
15
0
"""simple docstring""" import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : List[Any] = (DDIMParallelScheduler,) UpperCAmelCase__ : int = (("eta", 0.0), ("num_inference_steps", 50)) def __lowercase ( self , **_a ) -> Dict: _a : int = { '''num_train_timesteps''': 1_0_0_0, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''clip_sample''': True, } config.update(**_a ) return config def __lowercase ( self , **_a ) -> Union[str, Any]: _a : List[Any] = self.scheduler_classes[0] _a : Dict = self.get_scheduler_config(**_a ) _a : Dict = scheduler_class(**_a ) _a : Any = 1_0, 0.0 _a : Optional[int] = self.dummy_model() _a : Tuple = self.dummy_sample_deter scheduler.set_timesteps(_a ) for t in scheduler.timesteps: _a : Tuple = model(_a , _a ) _a : str = scheduler.step(_a , _a , _a , _a ).prev_sample return sample def __lowercase ( self ) -> Tuple: for timesteps in [1_0_0, 5_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=_a ) def __lowercase ( self ) -> Dict: for steps_offset in [0, 1]: self.check_over_configs(steps_offset=_a ) _a : int = self.scheduler_classes[0] _a : Any = self.get_scheduler_config(steps_offset=1 ) _a : List[str] = scheduler_class(**_a ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps , torch.LongTensor([8_0_1, 6_0_1, 4_0_1, 2_0_1, 1] ) ) def __lowercase ( self ) -> int: for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=_a , beta_end=_a ) def __lowercase ( self ) -> List[Any]: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_a ) def __lowercase ( self ) -> int: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_a ) def __lowercase ( self ) -> Dict: for clip_sample in [True, False]: self.check_over_configs(clip_sample=_a ) def __lowercase ( self ) -> Dict: for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=_a ) def __lowercase ( self ) -> Optional[int]: for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=_a ) def __lowercase ( self ) -> Optional[Any]: self.check_over_configs(thresholding=_a ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=_a , prediction_type=_a , sample_max_value=_a , ) def __lowercase ( self ) -> Optional[int]: for t in [1, 1_0, 4_9]: self.check_over_forward(time_step=_a ) def __lowercase ( self ) -> int: for t, num_inference_steps in zip([1, 1_0, 5_0] , [1_0, 5_0, 5_0_0] ): self.check_over_forward(time_step=_a , num_inference_steps=_a ) def __lowercase ( self ) -> Tuple: for t, eta in zip([1, 1_0, 4_9] , [0.0, 0.5, 1.0] ): self.check_over_forward(time_step=_a , eta=_a ) def __lowercase ( self ) -> Optional[int]: _a : Any = self.scheduler_classes[0] _a : List[str] = self.get_scheduler_config() _a : int = scheduler_class(**_a ) assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_2_0 , 4_0_0 ) - 0.1_4771 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_8_0 , 9_6_0 ) - 0.3_2460 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 , 4_8_6 ) - 0.0_0979 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 , 9_9_8 ) - 0.02 ) ) < 1e-5 def __lowercase ( self ) -> List[str]: _a : List[Any] = self.scheduler_classes[0] _a : Union[str, Any] = self.get_scheduler_config() _a : Tuple = scheduler_class(**_a ) _a : Union[str, Any] = 1_0, 0.0 scheduler.set_timesteps(_a ) _a : int = self.dummy_model() _a : int = self.dummy_sample_deter _a : List[str] = self.dummy_sample_deter + 0.1 _a : List[str] = self.dummy_sample_deter - 0.1 _a : int = samplea.shape[0] _a : int = torch.stack([samplea, samplea, samplea] , dim=0 ) _a : Optional[int] = torch.arange(_a )[0:3, None].repeat(1 , _a ) _a : List[Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) _a : Dict = scheduler.batch_step_no_noise(_a , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , _a ) _a : List[Any] = torch.sum(torch.abs(_a ) ) _a : str = torch.mean(torch.abs(_a ) ) assert abs(result_sum.item() - 1147.7904 ) < 1e-2 assert abs(result_mean.item() - 0.4982 ) < 1e-3 def __lowercase ( self ) -> List[str]: _a : Optional[int] = self.full_loop() _a : Optional[Any] = torch.sum(torch.abs(_a ) ) _a : Optional[Any] = torch.mean(torch.abs(_a ) ) assert abs(result_sum.item() - 172.0067 ) < 1e-2 assert abs(result_mean.item() - 0.22_3967 ) < 1e-3 def __lowercase ( self ) -> Optional[int]: _a : Optional[int] = self.full_loop(prediction_type='''v_prediction''' ) _a : Union[str, Any] = torch.sum(torch.abs(_a ) ) _a : Optional[Any] = torch.mean(torch.abs(_a ) ) assert abs(result_sum.item() - 52.5302 ) < 1e-2 assert abs(result_mean.item() - 0.0684 ) < 1e-3 def __lowercase ( self ) -> Dict: # We specify different beta, so that the first alpha is 0.99 _a : Any = self.full_loop(set_alpha_to_one=_a , beta_start=0.01 ) _a : str = torch.sum(torch.abs(_a ) ) _a : Union[str, Any] = torch.mean(torch.abs(_a ) ) assert abs(result_sum.item() - 149.8295 ) < 1e-2 assert abs(result_mean.item() - 0.1951 ) < 1e-3 def __lowercase ( self ) -> Union[str, Any]: # We specify different beta, so that the first alpha is 0.99 _a : List[Any] = self.full_loop(set_alpha_to_one=_a , beta_start=0.01 ) _a : str = torch.sum(torch.abs(_a ) ) _a : List[Any] = torch.mean(torch.abs(_a ) ) assert abs(result_sum.item() - 149.0784 ) < 1e-2 assert abs(result_mean.item() - 0.1941 ) < 1e-3
364
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a__ = { '''configuration_xmod''': [ '''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XmodConfig''', '''XmodOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ = [ '''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XmodForCausalLM''', '''XmodForMaskedLM''', '''XmodForMultipleChoice''', '''XmodForQuestionAnswering''', '''XmodForSequenceClassification''', '''XmodForTokenClassification''', '''XmodModel''', '''XmodPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
15
0
class UpperCAmelCase_ : """simple docstring""" def __init__( self ) -> None: _a : dict[str, TrieNode] = {} # Mapping from char to TrieNode _a : Optional[int] = False def __lowercase ( self , _a ) -> None: for word in words: self.insert(_a ) def __lowercase ( self , _a ) -> None: _a : Tuple = self for char in word: if char not in curr.nodes: _a : Union[str, Any] = TrieNode() _a : str = curr.nodes[char] _a : Any = True def __lowercase ( self , _a ) -> bool: _a : Tuple = self for char in word: if char not in curr.nodes: return False _a : Dict = curr.nodes[char] return curr.is_leaf def __lowercase ( self , _a ) -> None: def _delete(_a , _a , _a ) -> bool: if index == len(_a ): # If word does not exist if not curr.is_leaf: return False _a : List[str] = False return len(curr.nodes ) == 0 _a : Union[str, Any] = word[index] _a : Dict = curr.nodes.get(_a ) # If char not in current trie node if not char_node: return False # Flag to check if node can be deleted _a : Tuple = _delete(_a , _a , index + 1 ) if delete_curr: del curr.nodes[char] return len(curr.nodes ) == 0 return delete_curr _delete(self , _a , 0 ) def __UpperCAmelCase ( __a : TrieNode ,__a : str ) -> None: """simple docstring""" if node.is_leaf: print(__a ,end=''' ''' ) for key, value in node.nodes.items(): print_words(__a ,word + key ) def __UpperCAmelCase ( ) -> bool: """simple docstring""" _a : int = '''banana bananas bandana band apple all beast'''.split() _a : Union[str, Any] = TrieNode() root.insert_many(__a ) # print_words(root, "") assert all(root.find(__a ) for word in words ) assert root.find('''banana''' ) assert not root.find('''bandanas''' ) assert not root.find('''apps''' ) assert root.find('''apple''' ) assert root.find('''all''' ) root.delete('''all''' ) assert not root.find('''all''' ) root.delete('''banana''' ) assert not root.find('''banana''' ) assert root.find('''bananas''' ) return True def __UpperCAmelCase ( __a : str ,__a : bool ) -> None: """simple docstring""" print(str(__a ) ,'''works!''' if passes else '''doesn\'t work :(''' ) def __UpperCAmelCase ( ) -> None: """simple docstring""" assert test_trie() def __UpperCAmelCase ( ) -> None: """simple docstring""" print_results('''Testing trie functionality''' ,test_trie() ) if __name__ == "__main__": main()
365
import re import tempfile from pathlib import Path import pytest import yaml from datasets.utils.readme import ReadMe # @pytest.fixture # def example_yaml_structure(): a__ = yaml.safe_load( '''\ name: "" allow_empty: false allow_empty_text: true subsections: - name: "Dataset Card for X" # First-level markdown heading allow_empty: false allow_empty_text: true subsections: - name: "Table of Contents" allow_empty: false allow_empty_text: false subsections: null - name: "Dataset Description" allow_empty: false allow_empty_text: false subsections: - name: "Dataset Summary" allow_empty: false allow_empty_text: false subsections: null - name: "Supported Tasks and Leaderboards" allow_empty: true allow_empty_text: true subsections: null - name: Languages allow_empty: false allow_empty_text: true subsections: null ''' ) a__ = { '''name''': '''root''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ { '''name''': '''Dataset Card for My Dataset''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ {'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []}, { '''name''': '''Dataset Description''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Dataset Summary''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [], }, { '''name''': '''Supported Tasks and Leaderboards''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], }, {'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []}, ], }, ], } ], } a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. #### Extra Ignored Subsection ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = { '''name''': '''root''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ { '''name''': '''Dataset Card for My Dataset''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ {'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []}, { '''name''': '''Dataset Description''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Dataset Summary''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Extra Ignored Subsection''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], } ], }, { '''name''': '''Supported Tasks and Leaderboards''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], }, {'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []}, ], }, ], } ], } a__ = '''\ --- --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = ( '''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.''' ) a__ = '''\ # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = ( '''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.''' ) a__ = '''\ --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Languages Language Text ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.''' a__ = '''\ --- language: - zh - en --- ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text # Dataset Card My Dataset ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.''' a__ = '''\ --- language: - zh - en --- # Dataset Card My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.''' a__ = '''''' a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.''' @pytest.mark.parametrize( '''readme_md, expected_dict''' ,[ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] ,) def __UpperCAmelCase ( __a : Union[str, Any] ,__a : List[str] ) -> Optional[int]: """simple docstring""" assert ReadMe.from_string(__a ,__a ).to_dict() == expected_dict @pytest.mark.parametrize( '''readme_md, expected_error''' ,[ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] ,) def __UpperCAmelCase ( __a : List[str] ,__a : Optional[Any] ) -> Union[str, Any]: """simple docstring""" with pytest.raises(__a ,match=re.escape(expected_error.format(path='''root''' ) ) ): _a : List[Any] = ReadMe.from_string(__a ,__a ) readme.validate() @pytest.mark.parametrize( '''readme_md, expected_error''' ,[ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] ,) def __UpperCAmelCase ( __a : Dict ,__a : Dict ) -> Tuple: """simple docstring""" with pytest.raises(__a ,match=re.escape(expected_error.format(path='''root''' ) ) ): ReadMe.from_string(__a ,__a ) @pytest.mark.parametrize( '''readme_md,''' ,[ (README_MULTIPLE_SAME_HEADING_1), ] ,) def __UpperCAmelCase ( __a : Optional[Any] ) -> Tuple: """simple docstring""" ReadMe.from_string(__a ,__a ,suppress_parsing_errors=__a ) @pytest.mark.parametrize( '''readme_md, expected_dict''' ,[ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] ,) def __UpperCAmelCase ( __a : Union[str, Any] ,__a : Any ) -> Optional[int]: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: _a : Tuple = Path(__a ) / '''README.md''' with open(__a ,'''w+''' ) as readme_file: readme_file.write(__a ) _a : Optional[Any] = ReadMe.from_readme(__a ,__a ).to_dict() assert out["name"] == path assert out["text"] == "" assert out["is_empty_text"] assert out["subsections"] == expected_dict["subsections"] @pytest.mark.parametrize( '''readme_md, expected_error''' ,[ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] ,) def __UpperCAmelCase ( __a : List[Any] ,__a : List[Any] ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: _a : int = Path(__a ) / '''README.md''' with open(__a ,'''w+''' ) as readme_file: readme_file.write(__a ) _a : Optional[int] = expected_error.format(path=__a ) with pytest.raises(__a ,match=re.escape(__a ) ): _a : Any = ReadMe.from_readme(__a ,__a ) readme.validate() @pytest.mark.parametrize( '''readme_md, expected_error''' ,[ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] ,) def __UpperCAmelCase ( __a : str ,__a : Union[str, Any] ) -> Dict: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: _a : Optional[Any] = Path(__a ) / '''README.md''' with open(__a ,'''w+''' ) as readme_file: readme_file.write(__a ) _a : str = expected_error.format(path=__a ) with pytest.raises(__a ,match=re.escape(__a ) ): ReadMe.from_readme(__a ,__a ) @pytest.mark.parametrize( '''readme_md,''' ,[ (README_MULTIPLE_SAME_HEADING_1), ] ,) def __UpperCAmelCase ( __a : Optional[Any] ) -> str: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: _a : int = Path(__a ) / '''README.md''' with open(__a ,'''w+''' ) as readme_file: readme_file.write(__a ) ReadMe.from_readme(__a ,__a ,suppress_parsing_errors=__a )
15
0
from typing import Dict, Iterable, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract a__ = logging.get_logger(__name__) def __UpperCAmelCase ( __a : Union[str, Any] ,__a : str ,__a : Union[str, Any] ) -> List[str]: """simple docstring""" return [ int(1_000 * (box[0] / width) ), int(1_000 * (box[1] / height) ), int(1_000 * (box[2] / width) ), int(1_000 * (box[3] / height) ), ] def __UpperCAmelCase ( __a : np.ndarray ,__a : Optional[str] ,__a : Optional[str] ) -> List[Any]: """simple docstring""" _a : str = to_pil_image(__a ) _a : Optional[Any] = pil_image.size _a : Tuple = pytesseract.image_to_data(__a ,lang=__a ,output_type='''dict''' ,config=__a ) _a : List[str] = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height'''] # filter empty words and corresponding coordinates _a : Dict = [idx for idx, word in enumerate(__a ) if not word.strip()] _a : str = [word for idx, word in enumerate(__a ) if idx not in irrelevant_indices] _a : List[str] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices] _a : Union[str, Any] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices] _a : str = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices] _a : Union[str, Any] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format _a : int = [] for x, y, w, h in zip(__a ,__a ,__a ,__a ): _a : List[str] = [x, y, x + w, y + h] actual_boxes.append(__a ) # finally, normalize the bounding boxes _a : Dict = [] for box in actual_boxes: normalized_boxes.append(normalize_box(__a ,__a ,__a ) ) assert len(__a ) == len(__a ), "Not as many words as there are bounding boxes" return words, normalized_boxes class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : Optional[int] = ["pixel_values"] def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = True , _a = 1 / 2_5_5 , _a = True , _a = None , _a = None , _a = True , _a = None , _a = "" , **_a , ) -> None: super().__init__(**_a ) _a : List[str] = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4} _a : Union[str, Any] = get_size_dict(_a ) _a : int = do_resize _a : Optional[int] = size _a : str = resample _a : str = do_rescale _a : Any = rescale_value _a : Optional[Any] = do_normalize _a : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _a : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD _a : List[Any] = apply_ocr _a : Optional[int] = ocr_lang _a : Tuple = tesseract_config def __lowercase ( self , _a , _a , _a = PILImageResampling.BILINEAR , _a = None , **_a , ) -> np.ndarray: _a : Any = get_size_dict(_a ) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" ) _a : Optional[int] = (size['''height'''], size['''width''']) return resize(_a , size=_a , resample=_a , data_format=_a , **_a ) def __lowercase ( self , _a , _a , _a = None , **_a , ) -> np.ndarray: return rescale(_a , scale=_a , data_format=_a , **_a ) def __lowercase ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray: return normalize(_a , mean=_a , std=_a , data_format=_a , **_a ) def __lowercase ( self , _a , _a = None , _a = None , _a=None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> PIL.Image.Image: _a : Optional[int] = do_resize if do_resize is not None else self.do_resize _a : Union[str, Any] = size if size is not None else self.size _a : Any = get_size_dict(_a ) _a : List[str] = resample if resample is not None else self.resample _a : int = do_rescale if do_rescale is not None else self.do_rescale _a : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor _a : int = do_normalize if do_normalize is not None else self.do_normalize _a : str = image_mean if image_mean is not None else self.image_mean _a : Tuple = image_std if image_std is not None else self.image_std _a : Any = apply_ocr if apply_ocr is not None else self.apply_ocr _a : int = ocr_lang if ocr_lang is not None else self.ocr_lang _a : Optional[int] = tesseract_config if tesseract_config is not None else self.tesseract_config _a : List[Any] = make_list_of_images(_a ) if not valid_images(_a ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' ) # All transformations expect numpy arrays. _a : Any = [to_numpy_array(_a ) for image in images] # Tesseract OCR to get words + normalized bounding boxes if apply_ocr: requires_backends(self , '''pytesseract''' ) _a : str = [] _a : str = [] for image in images: _a : Union[str, Any] = apply_tesseract(_a , _a , _a ) words_batch.append(_a ) boxes_batch.append(_a ) if do_resize: _a : List[str] = [self.resize(image=_a , size=_a , resample=_a ) for image in images] if do_rescale: _a : Optional[Any] = [self.rescale(image=_a , scale=_a ) for image in images] if do_normalize: _a : List[Any] = [self.normalize(image=_a , mean=_a , std=_a ) for image in images] _a : List[str] = [to_channel_dimension_format(_a , _a ) for image in images] _a : List[str] = BatchFeature(data={'''pixel_values''': images} , tensor_type=_a ) if apply_ocr: _a : Optional[int] = words_batch _a : List[Any] = boxes_batch return data
366
from __future__ import annotations def __UpperCAmelCase ( __a : list ) -> float: """simple docstring""" if not nums: raise ValueError('''List is empty''' ) return sum(__a ) / len(__a ) if __name__ == "__main__": import doctest doctest.testmod()
15
0
import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a__ = logging.get_logger(__name__) a__ = '''▁''' a__ = { '''vocab_file''': '''vocab.json''', '''spm_file''': '''sentencepiece.bpe.model''', } a__ = { '''vocab_file''': { '''facebook/s2t-small-librispeech-asr''': ( '''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json''' ), }, '''spm_file''': { '''facebook/s2t-small-librispeech-asr''': ( '''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model''' ) }, } a__ = { '''facebook/s2t-small-librispeech-asr''': 1024, } a__ = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de'''] a__ = {'''mustc''': MUSTC_LANGS} class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : List[Any] = VOCAB_FILES_NAMES UpperCAmelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ : Optional[Any] = MAX_MODEL_INPUT_SIZES UpperCAmelCase__ : List[str] = ["input_ids", "attention_mask"] UpperCAmelCase__ : List[int] = [] def __init__( self , _a , _a , _a="<s>" , _a="</s>" , _a="<pad>" , _a="<unk>" , _a=False , _a=False , _a=None , _a=None , _a = None , **_a , ) -> None: _a : Any = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_a , eos_token=_a , unk_token=_a , pad_token=_a , do_upper_case=_a , do_lower_case=_a , tgt_lang=_a , lang_codes=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , ) _a : Any = do_upper_case _a : Tuple = do_lower_case _a : Union[str, Any] = load_json(_a ) _a : List[Any] = {v: k for k, v in self.encoder.items()} _a : List[str] = spm_file _a : Union[str, Any] = load_spm(_a , self.sp_model_kwargs ) if lang_codes is not None: _a : Optional[Any] = lang_codes _a : str = LANGUAGES[lang_codes] _a : str = [F"""<lang:{lang}>""" for lang in self.langs] _a : str = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""" ) for lang in self.langs} _a : Tuple = self.lang_tokens _a : Any = tgt_lang if tgt_lang is not None else self.langs[0] self.set_tgt_lang_special_tokens(self._tgt_lang ) else: _a : Union[str, Any] = {} @property def __lowercase ( self ) -> int: return len(self.encoder ) @property def __lowercase ( self ) -> str: return self._tgt_lang @tgt_lang.setter def __lowercase ( self , _a ) -> None: _a : Tuple = new_tgt_lang self.set_tgt_lang_special_tokens(_a ) def __lowercase ( self , _a ) -> None: _a : Tuple = self.lang_code_to_id[tgt_lang] _a : str = [lang_code_id] def __lowercase ( self , _a ) -> List[str]: return self.sp_model.encode(_a , out_type=_a ) def __lowercase ( self , _a ) -> int: return self.encoder.get(_a , self.encoder[self.unk_token] ) def __lowercase ( self , _a ) -> str: return self.decoder.get(_a , self.unk_token ) def __lowercase ( self , _a ) -> str: _a : Any = [] _a : Any = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: _a : Tuple = self.sp_model.decode(_a ) out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " " _a : Optional[int] = [] else: current_sub_tokens.append(_a ) _a : Union[str, Any] = self.sp_model.decode(_a ) out_string += decoded.upper() if self.do_upper_case else decoded return out_string.strip() def __lowercase ( self , _a , _a=None ) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id] def __lowercase ( self , _a , _a = None , _a = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a ) _a : Any = [1] * len(self.prefix_tokens ) _a : Any = [1] if token_ids_a is None: return prefix_ones + ([0] * len(_a )) + suffix_ones return prefix_ones + ([0] * len(_a )) + ([0] * len(_a )) + suffix_ones def __lowercase ( self ) -> Dict: _a : Union[str, Any] = self.encoder.copy() vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Dict: _a : str = self.__dict__.copy() _a : List[Any] = None return state def __setstate__( self , _a ) -> None: _a : Optional[int] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): _a : Optional[int] = {} _a : Union[str, Any] = load_spm(self.spm_file , self.sp_model_kwargs ) def __lowercase ( self , _a , _a = None ) -> Tuple[str]: _a : Any = Path(_a ) assert save_dir.is_dir(), F"""{save_directory} should be a directory""" _a : Any = save_dir / ( (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file'''] ) _a : Tuple = save_dir / ( (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file'''] ) save_json(self.encoder , _a ) if os.path.abspath(self.spm_file ) != os.path.abspath(_a ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , _a ) elif not os.path.isfile(self.spm_file ): with open(_a , '''wb''' ) as fi: _a : Tuple = self.sp_model.serialized_model_proto() fi.write(_a ) return (str(_a ), str(_a )) def __UpperCAmelCase ( __a : str ,__a : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor: """simple docstring""" _a : List[Any] = sentencepiece.SentencePieceProcessor(**__a ) spm.Load(str(__a ) ) return spm def __UpperCAmelCase ( __a : str ) -> Union[Dict, List]: """simple docstring""" with open(__a ,'''r''' ) as f: return json.load(__a ) def __UpperCAmelCase ( __a : Dict ,__a : str ) -> None: """simple docstring""" with open(__a ,'''w''' ) as f: json.dump(__a ,__a ,indent=2 )
367
import argparse import os import torch from transformers.utils import WEIGHTS_NAME a__ = ['''small''', '''medium''', '''large'''] a__ = '''lm_head.decoder.weight''' a__ = '''lm_head.weight''' def __UpperCAmelCase ( __a : str ,__a : str ) -> List[str]: """simple docstring""" _a : Any = torch.load(__a ) _a : List[str] = d.pop(__a ) os.makedirs(__a ,exist_ok=__a ) torch.save(__a ,os.path.join(__a ,__a ) ) if __name__ == "__main__": a__ = argparse.ArgumentParser() parser.add_argument('''--dialogpt_path''', default='''.''', type=str) a__ = parser.parse_args() for MODEL in DIALOGPT_MODELS: a__ = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''') a__ = f'''./DialoGPT-{MODEL}''' convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
15
0
import dataclasses import re import string from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple import numpy as np from . import residue_constants a__ = Mapping[str, np.ndarray] a__ = Mapping[str, Any] # Is a nested dict. a__ = 0.01 @dataclasses.dataclass(frozen=__lowercase ) class UpperCAmelCase_ : """simple docstring""" UpperCAmelCase__ : np.ndarray # [num_res, num_atom_type, 3] # Amino-acid type for each residue represented as an integer between 0 and # 20, where 20 is 'X'. UpperCAmelCase__ : np.ndarray # [num_res] # Binary float mask to indicate presence of a particular atom. 1.0 if an atom # is present and 0.0 if not. This should be used for loss masking. UpperCAmelCase__ : np.ndarray # [num_res, num_atom_type] # Residue index as used in PDB. It is not necessarily continuous or 0-indexed. UpperCAmelCase__ : np.ndarray # [num_res] # B-factors, or temperature factors, of each residue (in sq. angstroms units), # representing the displacement of the residue from its ground truth mean # value. UpperCAmelCase__ : np.ndarray # [num_res, num_atom_type] # Chain indices for multi-chain predictions UpperCAmelCase__ : Optional[np.ndarray] = None # Optional remark about the protein. Included as a comment in output PDB # files UpperCAmelCase__ : Optional[str] = None # Templates used to generate this protein (prediction-only) UpperCAmelCase__ : Optional[Sequence[str]] = None # Chain corresponding to each parent UpperCAmelCase__ : Optional[Sequence[int]] = None def __UpperCAmelCase ( __a : str ) -> Protein: """simple docstring""" _a : int = R'''(\[[A-Z]+\]\n)''' _a : List[str] = [tag.strip() for tag in re.split(__a ,__a ) if len(__a ) > 0] _a : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] ,[l.split('''\n''' ) for l in tags[1::2]] ) _a : List[str] = ["N", "CA", "C"] _a : Tuple = None _a : int = None _a : int = None for g in groups: if "[PRIMARY]" == g[0]: _a : Any = g[1][0].strip() for i in range(len(__a ) ): if seq[i] not in residue_constants.restypes: _a : Any = '''X''' # FIXME: strings are immutable _a : Dict = np.array( [residue_constants.restype_order.get(__a ,residue_constants.restype_num ) for res_symbol in seq] ) elif "[TERTIARY]" == g[0]: _a : List[List[float]] = [] for axis in range(3 ): tertiary.append(list(map(__a ,g[1][axis].split() ) ) ) _a : Union[str, Any] = np.array(__a ) _a : Any = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa ) for i, atom in enumerate(__a ): _a : Optional[int] = np.transpose(tertiary_np[:, i::3] ) atom_positions *= PICO_TO_ANGSTROM elif "[MASK]" == g[0]: _a : Any = np.array(list(map({'''-''': 0, '''+''': 1}.get ,g[1][0].strip() ) ) ) _a : Tuple = np.zeros( ( len(__a ), residue_constants.atom_type_num, ) ).astype(np.floataa ) for i, atom in enumerate(__a ): _a : Union[str, Any] = 1 atom_mask *= mask[..., None] assert aatype is not None return Protein( atom_positions=__a ,atom_mask=__a ,aatype=__a ,residue_index=np.arange(len(__a ) ) ,b_factors=__a ,) def __UpperCAmelCase ( __a : Protein ,__a : int = 0 ) -> List[str]: """simple docstring""" _a : List[str] = [] _a : str = prot.remark if remark is not None: pdb_headers.append(F"""REMARK {remark}""" ) _a : Optional[int] = prot.parents _a : Tuple = prot.parents_chain_index if parents is not None and parents_chain_index is not None: _a : int = [p for i, p in zip(__a ,__a ) if i == chain_id] if parents is None or len(__a ) == 0: _a : List[Any] = ['''N/A'''] pdb_headers.append(F"""PARENT {' '.join(__a )}""" ) return pdb_headers def __UpperCAmelCase ( __a : Protein ,__a : str ) -> str: """simple docstring""" _a : List[str] = [] _a : Union[str, Any] = pdb_str.split('''\n''' ) _a : Any = prot.remark if remark is not None: out_pdb_lines.append(F"""REMARK {remark}""" ) _a : List[List[str]] if prot.parents is not None and len(prot.parents ) > 0: _a : Tuple = [] if prot.parents_chain_index is not None: _a : Dict[str, List[str]] = {} for p, i in zip(prot.parents ,prot.parents_chain_index ): parent_dict.setdefault(str(__a ) ,[] ) parent_dict[str(__a )].append(__a ) _a : List[Any] = max([int(__a ) for chain_idx in parent_dict] ) for i in range(max_idx + 1 ): _a : int = parent_dict.get(str(__a ) ,['''N/A'''] ) parents_per_chain.append(__a ) else: parents_per_chain.append(list(prot.parents ) ) else: _a : Tuple = [['''N/A''']] def make_parent_line(__a : Sequence[str] ) -> str: return F"""PARENT {' '.join(__a )}""" out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) ) _a : List[Any] = 0 for i, l in enumerate(__a ): if "PARENT" not in l and "REMARK" not in l: out_pdb_lines.append(__a ) if "TER" in l and "END" not in lines[i + 1]: chain_counter += 1 if not chain_counter >= len(__a ): _a : str = parents_per_chain[chain_counter] else: _a : Optional[int] = ['''N/A'''] out_pdb_lines.append(make_parent_line(__a ) ) return "\n".join(__a ) def __UpperCAmelCase ( __a : Protein ) -> str: """simple docstring""" _a : str = residue_constants.restypes + ['''X'''] def res_atoa(__a : int ) -> str: return residue_constants.restype_atoa.get(restypes[r] ,'''UNK''' ) _a : Dict = residue_constants.atom_types _a : List[str] = [] _a : List[Any] = prot.atom_mask _a : Union[str, Any] = prot.aatype _a : Optional[Any] = prot.atom_positions _a : Dict = prot.residue_index.astype(np.intaa ) _a : Tuple = prot.b_factors _a : str = prot.chain_index if np.any(aatype > residue_constants.restype_num ): raise ValueError('''Invalid aatypes.''' ) _a : List[Any] = get_pdb_headers(__a ) if len(__a ) > 0: pdb_lines.extend(__a ) _a : List[str] = aatype.shape[0] _a : Optional[Any] = 1 _a : List[str] = 0 _a : Dict = string.ascii_uppercase _a : Optional[Any] = None # Add all atom sites. for i in range(__a ): _a : Any = res_atoa(aatype[i] ) for atom_name, pos, mask, b_factor in zip(__a ,atom_positions[i] ,atom_mask[i] ,b_factors[i] ): if mask < 0.5: continue _a : Optional[Any] = '''ATOM''' _a : List[Any] = atom_name if len(__a ) == 4 else F""" {atom_name}""" _a : Union[str, Any] = '''''' _a : Dict = '''''' _a : List[Any] = 1.00 _a : int = atom_name[0] # Protein supports only C, N, O, S, this works. _a : int = '''''' _a : Union[str, Any] = '''A''' if chain_index is not None: _a : int = chain_tags[chain_index[i]] # PDB is a columnar format, every space matters here! _a : Tuple = ( F"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}""" F"""{res_name_a:>3} {chain_tag:>1}""" F"""{residue_index[i]:>4}{insertion_code:>1} """ F"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}""" F"""{occupancy:>6.2f}{b_factor:>6.2f} """ F"""{element:>2}{charge:>2}""" ) pdb_lines.append(__a ) atom_index += 1 _a : List[Any] = i == n - 1 if chain_index is not None: if i != n - 1 and chain_index[i + 1] != prev_chain_index: _a : Optional[int] = True _a : Union[str, Any] = chain_index[i + 1] if should_terminate: # Close the chain. _a : str = '''TER''' _a : Any = ( F"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}""" ) pdb_lines.append(__a ) atom_index += 1 if i != n - 1: # "prev" is a misnomer here. This happens at the beginning of # each new chain. pdb_lines.extend(get_pdb_headers(__a ,__a ) ) pdb_lines.append('''END''' ) pdb_lines.append('''''' ) return "\n".join(__a ) def __UpperCAmelCase ( __a : Protein ) -> np.ndarray: """simple docstring""" return residue_constants.STANDARD_ATOM_MASK[prot.aatype] def __UpperCAmelCase ( __a : FeatureDict ,__a : ModelOutput ,__a : Optional[np.ndarray] = None ,__a : Optional[np.ndarray] = None ,__a : Optional[str] = None ,__a : Optional[Sequence[str]] = None ,__a : Optional[Sequence[int]] = None ,) -> Protein: """simple docstring""" return Protein( aatype=features['''aatype'''] ,atom_positions=result['''final_atom_positions'''] ,atom_mask=result['''final_atom_mask'''] ,residue_index=features['''residue_index'''] + 1 ,b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) ,chain_index=__a ,remark=__a ,parents=__a ,parents_chain_index=__a ,)
368
import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class UpperCAmelCase_ ( enum.Enum ): """simple docstring""" UpperCAmelCase__ : int = 0 UpperCAmelCase__ : Union[str, Any] = 1 UpperCAmelCase__ : Optional[Any] = 2 @add_end_docstrings(__lowercase ) class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : Optional[Any] = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n " def __init__( self , *_a , **_a ) -> List[str]: super().__init__(*_a , **_a ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. _a : Dict = None if self.model.config.prefix is not None: _a : List[Any] = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. _a : Optional[Any] = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. _a , _a , _a : str = self._sanitize_parameters(prefix=_a , **self._forward_params ) _a : Optional[Any] = {**self._preprocess_params, **preprocess_params} _a : List[Any] = {**self._forward_params, **forward_params} def __lowercase ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , **_a , ) -> Optional[int]: _a : List[Any] = {} if prefix is not None: _a : Optional[Any] = prefix if prefix: _a : Dict = self.tokenizer( _a , padding=_a , add_special_tokens=_a , return_tensors=self.framework ) _a : Tuple = prefix_inputs['''input_ids'''].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( F"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected""" ''' [None, \'hole\']''' ) _a : Dict = handle_long_generation preprocess_params.update(_a ) _a : Tuple = generate_kwargs _a : Any = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' ) if return_tensors is not None: raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' ) _a : List[str] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' ) _a : Any = ReturnType.TENSORS if return_type is not None: _a : Any = return_type if clean_up_tokenization_spaces is not None: _a : List[Any] = clean_up_tokenization_spaces if stop_sequence is not None: _a : Tuple = self.tokenizer.encode(_a , add_special_tokens=_a ) if len(_a ) > 1: warnings.warn( '''Stopping on a multiple token sequence is not yet supported on transformers. The first token of''' ''' the stop sequence will be used as the stop sequence string in the interim.''' ) _a : List[Any] = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def __lowercase ( self , *_a , **_a ) -> Union[str, Any]: # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'''add_space_before_punct_symbol''': True} ) return super()._parse_and_tokenize(*_a , **_a ) def __call__( self , _a , **_a ) -> List[str]: return super().__call__(_a , **_a ) def __lowercase ( self , _a , _a="" , _a=None , **_a ) -> List[Any]: _a : Optional[int] = self.tokenizer( prefix + prompt_text , padding=_a , add_special_tokens=_a , return_tensors=self.framework ) _a : Union[str, Any] = prompt_text if handle_long_generation == "hole": _a : List[str] = inputs['''input_ids'''].shape[-1] if "max_new_tokens" in generate_kwargs: _a : int = generate_kwargs['''max_new_tokens'''] else: _a : List[Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError('''We cannot infer how many new tokens are expected''' ) if cur_len + new_tokens > self.tokenizer.model_max_length: _a : List[str] = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( '''We cannot use `hole` to handle this generation the number of desired tokens exceeds the''' ''' models max length''' ) _a : List[Any] = inputs['''input_ids'''][:, -keep_length:] if "attention_mask" in inputs: _a : List[str] = inputs['''attention_mask'''][:, -keep_length:] return inputs def __lowercase ( self , _a , **_a ) -> Optional[int]: _a : Any = model_inputs['''input_ids'''] _a : Optional[Any] = model_inputs.get('''attention_mask''' , _a ) # Allow empty prompts if input_ids.shape[1] == 0: _a : int = None _a : int = None _a : List[str] = 1 else: _a : List[Any] = input_ids.shape[0] _a : Union[str, Any] = model_inputs.pop('''prompt_text''' ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. _a : int = generate_kwargs.pop('''prefix_length''' , 0 ) if prefix_length > 0: _a : Tuple = '''max_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].max_new_tokens is not None ) if not has_max_new_tokens: _a : int = generate_kwargs.get('''max_length''' ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length _a : Dict = '''min_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL _a : Optional[Any] = self.model.generate(input_ids=_a , attention_mask=_a , **_a ) _a : int = generated_sequence.shape[0] if self.framework == "pt": _a : Tuple = generated_sequence.reshape(_a , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": _a : List[Any] = tf.reshape(_a , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def __lowercase ( self , _a , _a=ReturnType.FULL_TEXT , _a=True ) -> int: _a : Tuple = model_outputs['''generated_sequence'''][0] _a : int = model_outputs['''input_ids'''] _a : Any = model_outputs['''prompt_text'''] _a : Any = generated_sequence.numpy().tolist() _a : Any = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: _a : Optional[int] = {'''generated_token_ids''': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text _a : str = self.tokenizer.decode( _a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: _a : Union[str, Any] = 0 else: _a : str = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , ) ) if return_type == ReturnType.FULL_TEXT: _a : str = prompt_text + text[prompt_length:] else: _a : List[str] = text[prompt_length:] _a : Union[str, Any] = {'''generated_text''': all_text} records.append(_a ) return records
15
0
import sys import turtle def __UpperCAmelCase ( __a : tuple[float, float] ,__a : tuple[float, float] ) -> tuple[float, float]: """simple docstring""" return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2 def __UpperCAmelCase ( __a : tuple[float, float] ,__a : tuple[float, float] ,__a : tuple[float, float] ,__a : int ,) -> None: """simple docstring""" my_pen.up() my_pen.goto(vertexa[0] ,vertexa[1] ) my_pen.down() my_pen.goto(vertexa[0] ,vertexa[1] ) my_pen.goto(vertexa[0] ,vertexa[1] ) my_pen.goto(vertexa[0] ,vertexa[1] ) if depth == 0: return triangle(__a ,get_mid(__a ,__a ) ,get_mid(__a ,__a ) ,depth - 1 ) triangle(__a ,get_mid(__a ,__a ) ,get_mid(__a ,__a ) ,depth - 1 ) triangle(__a ,get_mid(__a ,__a ) ,get_mid(__a ,__a ) ,depth - 1 ) if __name__ == "__main__": if len(sys.argv) != 2: raise ValueError( '''Correct format for using this script: ''' '''python fractals.py <int:depth_for_fractal>''' ) a__ = turtle.Turtle() my_pen.ht() my_pen.speed(5) my_pen.pencolor('''red''') a__ = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
369
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.test_utils import execute_subprocess_async def __UpperCAmelCase ( __a : Dict=None ) -> str: """simple docstring""" if subparsers is not None: _a : Union[str, Any] = subparsers.add_parser('''test''' ) else: _a : List[str] = argparse.ArgumentParser('''Accelerate test command''' ) parser.add_argument( '''--config_file''' ,default=__a ,help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ) ,) if subparsers is not None: parser.set_defaults(func=__a ) return parser def __UpperCAmelCase ( __a : List[Any] ) -> Union[str, Any]: """simple docstring""" _a : Dict = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] ) if args.config_file is None: _a : List[Any] = script_name else: _a : Union[str, Any] = F"""--config_file={args.config_file} {script_name}""" _a : str = ['''accelerate-launch'''] + test_args.split() _a : str = execute_subprocess_async(__a ,env=os.environ.copy() ) if result.returncode == 0: print('''Test is a success! You are ready for your distributed training!''' ) def __UpperCAmelCase ( ) -> List[Any]: """simple docstring""" _a : Optional[int] = test_command_parser() _a : List[Any] = parser.parse_args() test_command(__a ) if __name__ == "__main__": main()
15
0
def __UpperCAmelCase ( __a : int ,__a : int ) -> int: """simple docstring""" return x if y == 0 else greatest_common_divisor(__a ,x % y ) def __UpperCAmelCase ( __a : int ,__a : int ) -> int: """simple docstring""" return (x * y) // greatest_common_divisor(__a ,__a ) def __UpperCAmelCase ( __a : int = 20 ) -> int: """simple docstring""" _a : str = 1 for i in range(1 ,n + 1 ): _a : Tuple = lcm(__a ,__a ) return g if __name__ == "__main__": print(f'''{solution() = }''')
370
import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __lowercase ( self ) -> Union[str, Any]: _a : Optional[Any] = tempfile.mkdtemp() # fmt: off _a : Optional[int] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest'''] # fmt: on _a : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) _a : Any = { '''do_resize''': True, '''size''': {'''height''': 1_8, '''width''': 1_8}, '''do_normalize''': True, '''image_mean''': [0.5, 0.5, 0.5], '''image_std''': [0.5, 0.5, 0.5], } _a : str = os.path.join(self.tmpdirname , _a ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(_a , _a ) def __lowercase ( self , **_a ) -> Any: return BertTokenizer.from_pretrained(self.tmpdirname , **_a ) def __lowercase ( self , **_a ) -> str: return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a ) def __lowercase ( self ) -> List[Any]: shutil.rmtree(self.tmpdirname ) def __lowercase ( self ) -> Any: _a : Union[str, Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] _a : Tuple = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs] return image_inputs def __lowercase ( self ) -> str: _a : List[str] = self.get_tokenizer() _a : Tuple = self.get_image_processor() _a : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) processor.save_pretrained(self.tmpdirname ) _a : Dict = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , _a ) def __lowercase ( self ) -> Dict: _a : List[str] = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _a : Any = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) _a : List[Any] = self.get_image_processor(do_normalize=_a , padding_value=1.0 ) _a : Dict = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_a , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _a ) def __lowercase ( self ) -> Any: _a : Dict = self.get_image_processor() _a : str = self.get_tokenizer() _a : int = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) _a : List[str] = self.prepare_image_inputs() _a : List[Any] = image_processor(_a , return_tensors='''np''' ) _a : Dict = processor(images=_a , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __lowercase ( self ) -> List[str]: _a : Union[str, Any] = self.get_image_processor() _a : Dict = self.get_tokenizer() _a : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) _a : Tuple = '''lower newer''' _a : int = processor(text=_a ) _a : str = tokenizer(_a ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __lowercase ( self ) -> List[Any]: _a : Any = self.get_image_processor() _a : str = self.get_tokenizer() _a : Tuple = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) _a : List[Any] = '''lower newer''' _a : Union[str, Any] = self.prepare_image_inputs() _a : Any = processor(text=_a , images=_a ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with self.assertRaises(_a ): processor() def __lowercase ( self ) -> Optional[int]: _a : Union[str, Any] = self.get_image_processor() _a : List[str] = self.get_tokenizer() _a : Any = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) _a : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _a : int = processor.batch_decode(_a ) _a : int = tokenizer.batch_decode(_a ) self.assertListEqual(_a , _a ) def __lowercase ( self ) -> List[Any]: _a : Tuple = self.get_image_processor() _a : List[str] = self.get_tokenizer() _a : str = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) _a : Optional[int] = '''lower newer''' _a : Dict = self.prepare_image_inputs() _a : Any = processor(text=_a , images=_a ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
15
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL a__ = logging.get_logger(__name__) class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : Union[str, Any] = ["pixel_values"] def __init__( self , _a = True , _a = None , _a = PIL.Image.BICUBIC , _a = True , _a = None , _a = 1 / 2_5_5 , _a = True , _a = True , _a = None , _a = None , **_a , ) -> None: super().__init__(**_a ) _a : Dict = size if size is not None else {'''height''': 2_5_6, '''width''': 2_5_6} _a : int = get_size_dict(_a ) _a : Tuple = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4} _a : Dict = get_size_dict(_a , param_name='''crop_size''' ) _a : Union[str, Any] = do_resize _a : Any = size _a : List[Any] = resample _a : List[Any] = do_center_crop _a : Union[str, Any] = crop_size _a : str = do_rescale _a : Union[str, Any] = rescale_factor _a : int = do_normalize _a : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _a : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD def __lowercase ( self , _a , _a , _a = PIL.Image.BICUBIC , _a = None , **_a , ) -> np.ndarray: _a : List[Any] = get_size_dict(_a ) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" ) return resize( _a , size=(size['''height'''], size['''width''']) , resample=_a , data_format=_a , **_a ) def __lowercase ( self , _a , _a , _a = None , **_a , ) -> np.ndarray: _a : Union[str, Any] = get_size_dict(_a ) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" ) return center_crop(_a , size=(size['''height'''], size['''width''']) , data_format=_a , **_a ) def __lowercase ( self , _a , _a , _a = None , **_a , ) -> List[Any]: return rescale(_a , scale=_a , data_format=_a , **_a ) def __lowercase ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray: return normalize(_a , mean=_a , std=_a , data_format=_a , **_a ) def __lowercase ( self , _a , _a = None , _a = None , _a=None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> PIL.Image.Image: _a : int = do_resize if do_resize is not None else self.do_resize _a : int = resample if resample is not None else self.resample _a : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop _a : Any = do_rescale if do_rescale is not None else self.do_rescale _a : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor _a : List[Any] = do_normalize if do_normalize is not None else self.do_normalize _a : Optional[int] = image_mean if image_mean is not None else self.image_mean _a : Tuple = image_std if image_std is not None else self.image_std _a : Any = size if size is not None else self.size _a : str = get_size_dict(_a ) _a : List[Any] = crop_size if crop_size is not None else self.crop_size _a : int = get_size_dict(_a , param_name='''crop_size''' ) _a : Union[str, Any] = make_list_of_images(_a ) if not valid_images(_a ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. _a : Dict = [to_numpy_array(_a ) for image in images] if do_resize: _a : Optional[int] = [self.resize(image=_a , size=_a , resample=_a ) for image in images] if do_center_crop: _a : Optional[Any] = [self.center_crop(image=_a , size=_a ) for image in images] if do_rescale: _a : Optional[Any] = [self.rescale(image=_a , scale=_a ) for image in images] if do_normalize: _a : Dict = [self.normalize(image=_a , mean=_a , std=_a ) for image in images] _a : Tuple = [to_channel_dimension_format(_a , _a ) for image in images] _a : Dict = {'''pixel_values''': images} return BatchFeature(data=_a , tensor_type=_a )
371
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( HubertConfig, HubertForCTC, HubertModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() a__ = logging.get_logger(__name__) a__ = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[int] ,__a : Optional[int] ,__a : List[str] ,__a : List[Any] ) -> Union[str, Any]: """simple docstring""" for attribute in key.split('''.''' ): _a : Optional[Any] = getattr(__a ,__a ) if weight_type is not None: _a : Dict = getattr(__a ,__a ).shape else: _a : Optional[int] = hf_pointer.shape assert hf_shape == value.shape, ( F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": _a : List[Any] = value elif weight_type == "weight_g": _a : Any = value elif weight_type == "weight_v": _a : Union[str, Any] = value elif weight_type == "bias": _a : Optional[int] = value else: _a : List[Any] = value logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def __UpperCAmelCase ( __a : Any ,__a : Union[str, Any] ,__a : Union[str, Any] ) -> int: """simple docstring""" _a : Union[str, Any] = [] _a : Union[str, Any] = fairseq_model.state_dict() _a : Union[str, Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): _a : int = False if "conv_layers" in name: load_conv_layer( __a ,__a ,__a ,__a ,hf_model.config.feat_extract_norm == '''group''' ,) _a : Optional[Any] = True else: for key, mapped_key in MAPPING.items(): _a : Union[str, Any] = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned): _a : Any = True if "*" in mapped_key: _a : Optional[int] = name.split(__a )[0].split('''.''' )[-2] _a : Any = mapped_key.replace('''*''' ,__a ) if "weight_g" in name: _a : List[Any] = '''weight_g''' elif "weight_v" in name: _a : List[str] = '''weight_v''' elif "weight" in name: _a : Any = '''weight''' elif "bias" in name: _a : str = '''bias''' else: _a : Any = None set_recursively(__a ,__a ,__a ,__a ,__a ) continue if not is_used: unused_weights.append(__a ) logger.warning(F"""Unused weights: {unused_weights}""" ) def __UpperCAmelCase ( __a : int ,__a : Optional[Any] ,__a : Dict ,__a : List[str] ,__a : Any ) -> Tuple: """simple docstring""" _a : int = full_name.split('''conv_layers.''' )[-1] _a : Any = name.split('''.''' ) _a : List[Any] = int(items[0] ) _a : Optional[int] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) _a : Optional[int] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) _a : Optional[Any] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) _a : int = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) _a : Any = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(__a ) @torch.no_grad() def __UpperCAmelCase ( __a : Dict ,__a : List[Any] ,__a : List[str]=None ,__a : Optional[int]=None ,__a : int=True ) -> List[Any]: """simple docstring""" if config_path is not None: _a : Tuple = HubertConfig.from_pretrained(__a ) else: _a : Any = HubertConfig() if is_finetuned: if dict_path: _a : Tuple = Dictionary.load(__a ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _a : Any = target_dict.pad_index _a : Tuple = target_dict.bos_index _a : Optional[int] = target_dict.eos_index _a : Optional[Any] = len(target_dict.symbols ) _a : Tuple = os.path.join(__a ,'''vocab.json''' ) if not os.path.isdir(__a ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__a ) ) return os.makedirs(__a ,exist_ok=__a ) with open(__a ,'''w''' ,encoding='''utf-8''' ) as vocab_handle: json.dump(target_dict.indices ,__a ) _a : Tuple = WavaVecaCTCTokenizer( __a ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token='''|''' ,do_lower_case=__a ,) _a : Tuple = True if config.feat_extract_norm == '''layer''' else False _a : List[Any] = WavaVecaFeatureExtractor( feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=__a ,return_attention_mask=__a ,) _a : List[Any] = WavaVecaProcessor(feature_extractor=__a ,tokenizer=__a ) processor.save_pretrained(__a ) _a : Tuple = HubertForCTC(__a ) else: _a : Tuple = HubertModel(__a ) if is_finetuned: _a , _a , _a : int = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] ,arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: _a , _a , _a : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) _a : Any = model[0].eval() recursively_load_weights(__a ,__a ,__a ) hf_wavavec.save_pretrained(__a ) if __name__ == "__main__": a__ = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) a__ = parser.parse_args() convert_hubert_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
15
0
def __UpperCAmelCase ( __a : int = 1_000_000 ) -> int: """simple docstring""" _a : List[Any] = set(range(3 ,__a ,2 ) ) primes.add(2 ) for p in range(3 ,__a ,2 ): if p not in primes: continue primes.difference_update(set(range(p * p ,__a ,__a ) ) ) _a : Any = [float(__a ) for n in range(limit + 1 )] for p in primes: for n in range(__a ,limit + 1 ,__a ): phi[n] *= 1 - 1 / p return int(sum(phi[2:] ) ) if __name__ == "__main__": print(f'''{solution() = }''')
350
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : List[str] = ["image_processor", "tokenizer"] UpperCAmelCase__ : str = "ViltImageProcessor" UpperCAmelCase__ : Union[str, Any] = ("BertTokenizer", "BertTokenizerFast") def __init__( self , _a=None , _a=None , **_a ) -> Any: _a : Union[str, Any] = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , _a , ) _a : Dict = kwargs.pop('''feature_extractor''' ) _a : Optional[int] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(_a , _a ) _a : int = self.image_processor def __call__( self , _a , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ) -> BatchEncoding: _a : Tuple = self.tokenizer( text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , ) # add pixel_values + pixel_mask _a : str = self.image_processor(_a , return_tensors=_a ) encoding.update(_a ) return encoding def __lowercase ( self , *_a , **_a ) -> Optional[Any]: return self.tokenizer.batch_decode(*_a , **_a ) def __lowercase ( self , *_a , **_a ) -> str: return self.tokenizer.decode(*_a , **_a ) @property def __lowercase ( self ) -> Optional[int]: _a : str = self.tokenizer.model_input_names _a : Optional[Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def __lowercase ( self ) -> Optional[Any]: warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , ) return self.image_processor_class @property def __lowercase ( self ) -> Any: warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , ) return self.image_processor
15
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) a__ = { '''configuration_distilbert''': [ '''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DistilBertConfig''', '''DistilBertOnnxConfig''', ], '''tokenization_distilbert''': ['''DistilBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ = ['''DistilBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ = [ '''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DistilBertForMaskedLM''', '''DistilBertForMultipleChoice''', '''DistilBertForQuestionAnswering''', '''DistilBertForSequenceClassification''', '''DistilBertForTokenClassification''', '''DistilBertModel''', '''DistilBertPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ = [ '''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFDistilBertForMaskedLM''', '''TFDistilBertForMultipleChoice''', '''TFDistilBertForQuestionAnswering''', '''TFDistilBertForSequenceClassification''', '''TFDistilBertForTokenClassification''', '''TFDistilBertMainLayer''', '''TFDistilBertModel''', '''TFDistilBertPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ = [ '''FlaxDistilBertForMaskedLM''', '''FlaxDistilBertForMultipleChoice''', '''FlaxDistilBertForQuestionAnswering''', '''FlaxDistilBertForSequenceClassification''', '''FlaxDistilBertForTokenClassification''', '''FlaxDistilBertModel''', '''FlaxDistilBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
351
from math import ceil def __UpperCAmelCase ( __a : int = 1_001 ) -> int: """simple docstring""" _a : Dict = 1 for i in range(1 ,int(ceil(n / 2.0 ) ) ): _a : int = 2 * i + 1 _a : str = 2 * i _a : Any = total + 4 * odd**2 - 6 * even return total if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution()) else: try: a__ = int(sys.argv[1]) print(solution(n)) except ValueError: print('''Invalid entry - please enter a number''')
15
0
"""simple docstring""" import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness a__ = '''\ @misc{chen2021evaluating, title={Evaluating Large Language Models Trained on Code}, author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \ and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \ and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \ and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \ and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \ and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \ and Mohammad Bavarian and Clemens Winter and Philippe Tillet \ and Felipe Petroski Such and Dave Cummings and Matthias Plappert \ and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \ and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \ and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \ and William Saunders and Christopher Hesse and Andrew N. Carr \ and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \ and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \ and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \ and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba}, year={2021}, eprint={2107.03374}, archivePrefix={arXiv}, primaryClass={cs.LG} } ''' a__ = '''\ This metric implements the evaluation harness for the HumanEval problem solving dataset described in the paper "Evaluating Large Language Models Trained on Code" (https://arxiv.org/abs/2107.03374). ''' a__ = ''' Calculates how good are predictions given some references, using certain scores Args: predictions: list of candidates to evaluate. Each candidates should be a list of strings with several code candidates to solve the problem. references: a list with a test for each prediction. Each test should evaluate the correctness of a code candidate. k: number of code candidates to consider in the evaluation (Default: [1, 10, 100]) num_workers: number of workers used to evaluate the canidate programs (Default: 4). timeout: Returns: pass_at_k: dict with pass rates for each k results: dict with granular results of each unittest Examples: >>> code_eval = datasets.load_metric("code_eval") >>> test_cases = ["assert add(2,3)==5"] >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]] >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2]) >>> print(pass_at_k) {\'pass@1\': 0.5, \'pass@2\': 1.0} ''' a__ = ''' ################################################################################ !!!WARNING!!! ################################################################################ The "code_eval" metric executes untrusted model-generated code in Python. Although it is highly unlikely that model-generated code will do something overtly malicious in response to this test suite, model-generated code may act destructively due to a lack of model capability or alignment. Users are strongly encouraged to sandbox this evaluation suite so that it does not perform destructive actions on their host or network. For more information on how OpenAI sandboxes its code, see the paper "Evaluating Large Language Models Trained on Code" (https://arxiv.org/abs/2107.03374). Once you have read this disclaimer and taken appropriate precautions, set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this with: >>> import os >>> os.environ["HF_ALLOW_CODE_EVAL"] = "1" ################################################################################\ ''' a__ = '''The MIT License Copyright (c) OpenAI (https://openai.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase_ ( datasets.Metric ): """simple docstring""" def __lowercase ( self ) -> Optional[Any]: return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''' ) ), '''references''': datasets.Value('''string''' ), } ) , homepage='''https://github.com/openai/human-eval''' , codebase_urls=['''https://github.com/openai/human-eval'''] , reference_urls=['''https://github.com/openai/human-eval'''] , license=_LICENSE , ) def __lowercase ( self , _a , _a , _a=[1, 1_0, 1_0_0] , _a=4 , _a=3.0 ) -> Tuple: if os.getenv('''HF_ALLOW_CODE_EVAL''' , 0 ) != "1": raise ValueError(_WARNING ) if os.name == "nt": raise NotImplementedError('''This metric is currently not supported on Windows.''' ) with ThreadPoolExecutor(max_workers=_a ) as executor: _a : Dict = [] _a : str = Counter() _a : Any = 0 _a : List[str] = defaultdict(_a ) for task_id, (candidates, test_case) in enumerate(zip(_a , _a ) ): for candidate in candidates: _a : Any = candidate + '''\n''' + test_case _a : Optional[Any] = (test_program, timeout, task_id, completion_id[task_id]) _a : Dict = executor.submit(_a , *_a ) futures.append(_a ) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(_a ): _a : Any = future.result() results[result["task_id"]].append((result['''completion_id'''], result) ) _a : Dict = [], [] for result in results.values(): result.sort() _a : str = [r[1]['''passed'''] for r in result] total.append(len(_a ) ) correct.append(sum(_a ) ) _a : str = np.array(_a ) _a : int = np.array(_a ) _a : List[str] = k _a : Dict = {F"""pass@{k}""": estimate_pass_at_k(_a , _a , _a ).mean() for k in ks if (total >= k).all()} return pass_at_k, results def __UpperCAmelCase ( __a : Tuple ,__a : List[str] ,__a : Any ) -> Tuple: """simple docstring""" def estimator(__a : int ,__a : int ,__a : int ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 ,n + 1 ) ) if isinstance(__a ,__a ): _a : List[str] = itertools.repeat(__a ,len(__a ) ) else: assert len(__a ) == len(__a ) _a : Optional[Any] = iter(__a ) return np.array([estimator(int(__a ) ,int(__a ) ,__a ) for n, c in zip(__a ,__a )] )
352
from typing import Dict, Iterable, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract a__ = logging.get_logger(__name__) def __UpperCAmelCase ( __a : Union[str, Any] ,__a : str ,__a : Union[str, Any] ) -> List[str]: """simple docstring""" return [ int(1_000 * (box[0] / width) ), int(1_000 * (box[1] / height) ), int(1_000 * (box[2] / width) ), int(1_000 * (box[3] / height) ), ] def __UpperCAmelCase ( __a : np.ndarray ,__a : Optional[str] ,__a : Optional[str] ) -> List[Any]: """simple docstring""" _a : str = to_pil_image(__a ) _a , _a : Optional[Any] = pil_image.size _a : Tuple = pytesseract.image_to_data(__a ,lang=__a ,output_type='''dict''' ,config=__a ) _a , _a , _a , _a , _a : List[str] = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height'''] # filter empty words and corresponding coordinates _a : Dict = [idx for idx, word in enumerate(__a ) if not word.strip()] _a : str = [word for idx, word in enumerate(__a ) if idx not in irrelevant_indices] _a : List[str] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices] _a : Union[str, Any] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices] _a : str = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices] _a : Union[str, Any] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format _a : int = [] for x, y, w, h in zip(__a ,__a ,__a ,__a ): _a : List[str] = [x, y, x + w, y + h] actual_boxes.append(__a ) # finally, normalize the bounding boxes _a : Dict = [] for box in actual_boxes: normalized_boxes.append(normalize_box(__a ,__a ,__a ) ) assert len(__a ) == len(__a ), "Not as many words as there are bounding boxes" return words, normalized_boxes class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : Optional[int] = ["pixel_values"] def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = True , _a = 1 / 2_5_5 , _a = True , _a = None , _a = None , _a = True , _a = None , _a = "" , **_a , ) -> None: super().__init__(**_a ) _a : List[str] = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4} _a : Union[str, Any] = get_size_dict(_a ) _a : int = do_resize _a : Optional[int] = size _a : str = resample _a : str = do_rescale _a : Any = rescale_value _a : Optional[Any] = do_normalize _a : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _a : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD _a : List[Any] = apply_ocr _a : Optional[int] = ocr_lang _a : Tuple = tesseract_config def __lowercase ( self , _a , _a , _a = PILImageResampling.BILINEAR , _a = None , **_a , ) -> np.ndarray: _a : Any = get_size_dict(_a ) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" ) _a : Optional[int] = (size['''height'''], size['''width''']) return resize(_a , size=_a , resample=_a , data_format=_a , **_a ) def __lowercase ( self , _a , _a , _a = None , **_a , ) -> np.ndarray: return rescale(_a , scale=_a , data_format=_a , **_a ) def __lowercase ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray: return normalize(_a , mean=_a , std=_a , data_format=_a , **_a ) def __lowercase ( self , _a , _a = None , _a = None , _a=None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> PIL.Image.Image: _a : Optional[int] = do_resize if do_resize is not None else self.do_resize _a : Union[str, Any] = size if size is not None else self.size _a : Any = get_size_dict(_a ) _a : List[str] = resample if resample is not None else self.resample _a : int = do_rescale if do_rescale is not None else self.do_rescale _a : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor _a : int = do_normalize if do_normalize is not None else self.do_normalize _a : str = image_mean if image_mean is not None else self.image_mean _a : Tuple = image_std if image_std is not None else self.image_std _a : Any = apply_ocr if apply_ocr is not None else self.apply_ocr _a : int = ocr_lang if ocr_lang is not None else self.ocr_lang _a : Optional[int] = tesseract_config if tesseract_config is not None else self.tesseract_config _a : List[Any] = make_list_of_images(_a ) if not valid_images(_a ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' ) # All transformations expect numpy arrays. _a : Any = [to_numpy_array(_a ) for image in images] # Tesseract OCR to get words + normalized bounding boxes if apply_ocr: requires_backends(self , '''pytesseract''' ) _a : str = [] _a : str = [] for image in images: _a , _a : Union[str, Any] = apply_tesseract(_a , _a , _a ) words_batch.append(_a ) boxes_batch.append(_a ) if do_resize: _a : List[str] = [self.resize(image=_a , size=_a , resample=_a ) for image in images] if do_rescale: _a : Optional[Any] = [self.rescale(image=_a , scale=_a ) for image in images] if do_normalize: _a : List[Any] = [self.normalize(image=_a , mean=_a , std=_a ) for image in images] _a : List[str] = [to_channel_dimension_format(_a , _a ) for image in images] _a : List[str] = BatchFeature(data={'''pixel_values''': images} , tensor_type=_a ) if apply_ocr: _a : Optional[int] = words_batch _a : List[Any] = boxes_batch return data
15
0
"""simple docstring""" import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging a__ = logging.get_logger(__name__) def __UpperCAmelCase ( __a : Union[str, Any]=None ,__a : Dict=None ) -> List[Any]: """simple docstring""" return field(default_factory=lambda: default ,metadata=__a ) @dataclass class UpperCAmelCase_ : """simple docstring""" UpperCAmelCase__ : List[str] = list_field( default=[] , metadata={ "help": ( "Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version" " of all available models" ) } , ) UpperCAmelCase__ : List[int] = list_field( default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} ) UpperCAmelCase__ : List[int] = list_field( default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , ) UpperCAmelCase__ : bool = field( default=__lowercase , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , ) UpperCAmelCase__ : bool = field( default=__lowercase , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , ) UpperCAmelCase__ : bool = field( default=__lowercase , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} ) UpperCAmelCase__ : bool = field(default=__lowercase , metadata={"help": "Use FP16 to accelerate inference."} ) UpperCAmelCase__ : bool = field(default=__lowercase , metadata={"help": "Benchmark training of model"} ) UpperCAmelCase__ : bool = field(default=__lowercase , metadata={"help": "Verbose memory tracing"} ) UpperCAmelCase__ : bool = field( default=__lowercase , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , ) UpperCAmelCase__ : bool = field( default=__lowercase , metadata={ "help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory" } , ) UpperCAmelCase__ : bool = field(default=__lowercase , metadata={"help": "Trace memory line by line"} ) UpperCAmelCase__ : bool = field(default=__lowercase , metadata={"help": "Save result to a CSV file"} ) UpperCAmelCase__ : bool = field(default=__lowercase , metadata={"help": "Save all print statements in a log file"} ) UpperCAmelCase__ : bool = field(default=__lowercase , metadata={"help": "Whether to print environment information"} ) UpperCAmelCase__ : bool = field( default=__lowercase , metadata={ "help": ( "Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use" " multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled" " for debugging / testing and on TPU." ) } , ) UpperCAmelCase__ : str = field( default=F'''inference_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv."} , ) UpperCAmelCase__ : str = field( default=F'''inference_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv."} , ) UpperCAmelCase__ : str = field( default=F'''train_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv for training."} , ) UpperCAmelCase__ : str = field( default=F'''train_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , ) UpperCAmelCase__ : str = field( default=F'''env_info_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving environment information."} , ) UpperCAmelCase__ : str = field( default=F'''log_{round(time() )}.csv''' , metadata={"help": "Log filename used if print statements are saved in log."} , ) UpperCAmelCase__ : int = field(default=3 , metadata={"help": "Times an experiment will be run."} ) UpperCAmelCase__ : bool = field( default=__lowercase , metadata={ "help": ( "Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain" " model weights." ) } , ) def __lowercase ( self ) -> Union[str, Any]: warnings.warn( F"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils""" ''' are deprecated in general and it is advised to use external Benchmarking libraries ''' ''' to benchmark Transformer models.''' , _a , ) def __lowercase ( self ) -> List[Any]: return json.dumps(dataclasses.asdict(self ) , indent=2 ) @property def __lowercase ( self ) -> List[str]: if len(self.models ) <= 0: raise ValueError( '''Please make sure you provide at least one model name / model identifier, *e.g.* `--models''' ''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' ) return self.models @property def __lowercase ( self ) -> List[str]: if not self.multi_process: return False elif self.is_tpu: logger.info('''Multiprocessing is currently not possible on TPU.''' ) return False else: return True
353
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def __UpperCAmelCase ( ) -> Optional[Any]: """simple docstring""" _a : int = ArgumentParser('''Accelerate CLI tool''' ,usage='''accelerate <command> [<args>]''' ,allow_abbrev=__a ) _a : Optional[int] = parser.add_subparsers(help='''accelerate command helpers''' ) # Register commands get_config_parser(subparsers=__a ) env_command_parser(subparsers=__a ) launch_command_parser(subparsers=__a ) tpu_command_parser(subparsers=__a ) test_command_parser(subparsers=__a ) # Let's go _a : Dict = parser.parse_args() if not hasattr(__a ,'''func''' ): parser.print_help() exit(1 ) # Run args.func(__a ) if __name__ == "__main__": main()
15
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) a__ = { '''configuration_lxmert''': ['''LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LxmertConfig'''], '''tokenization_lxmert''': ['''LxmertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ = ['''LxmertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ = [ '''LxmertEncoder''', '''LxmertForPreTraining''', '''LxmertForQuestionAnswering''', '''LxmertModel''', '''LxmertPreTrainedModel''', '''LxmertVisualFeatureEncoder''', '''LxmertXLayer''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ = [ '''TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFLxmertForPreTraining''', '''TFLxmertMainLayer''', '''TFLxmertModel''', '''TFLxmertPreTrainedModel''', '''TFLxmertVisualFeatureEncoder''', ] if TYPE_CHECKING: from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig from .tokenization_lxmert import LxmertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_lxmert_fast import LxmertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lxmert import ( LxmertEncoder, LxmertForPreTraining, LxmertForQuestionAnswering, LxmertModel, LxmertPreTrainedModel, LxmertVisualFeatureEncoder, LxmertXLayer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_lxmert import ( TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFLxmertForPreTraining, TFLxmertMainLayer, TFLxmertModel, TFLxmertPreTrainedModel, TFLxmertVisualFeatureEncoder, ) else: import sys a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
354
import itertools import os import random import tempfile import unittest import numpy as np from transformers import TvltFeatureExtractor, is_datasets_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch if is_datasets_available(): from datasets import load_dataset a__ = random.Random() def __UpperCAmelCase ( __a : Tuple ,__a : str=1.0 ,__a : Optional[int]=None ,__a : List[Any]=None ) -> Any: """simple docstring""" if rng is None: _a : Dict = global_rng _a : Optional[Any] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __init__( self , _a , _a=7 , _a=4_0_0 , _a=2_0_0_0 , _a=2_0_4_8 , _a=1_2_8 , _a=1 , _a=5_1_2 , _a=3_0 , _a=4_4_1_0_0 , ) -> List[Any]: _a : Optional[Any] = parent _a : str = batch_size _a : List[str] = min_seq_length _a : str = max_seq_length _a : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) _a : List[Any] = spectrogram_length _a : List[str] = feature_size _a : List[Any] = num_audio_channels _a : Tuple = hop_length _a : Optional[int] = chunk_length _a : int = sampling_rate def __lowercase ( self ) -> Union[str, Any]: return { "spectrogram_length": self.spectrogram_length, "feature_size": self.feature_size, "num_audio_channels": self.num_audio_channels, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "sampling_rate": self.sampling_rate, } def __lowercase ( self , _a=False , _a=False ) -> List[Any]: def _flatten(_a ): return list(itertools.chain(*_a ) ) if equal_length: _a : List[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size _a : List[Any] = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: _a : str = [np.asarray(_a ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class UpperCAmelCase_ ( __lowercase , unittest.TestCase ): """simple docstring""" UpperCAmelCase__ : List[Any] = TvltFeatureExtractor def __lowercase ( self ) -> Dict: _a : List[str] = TvltFeatureExtractionTester(self ) def __lowercase ( self ) -> Any: _a : List[Any] = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(_a , '''spectrogram_length''' ) ) self.assertTrue(hasattr(_a , '''feature_size''' ) ) self.assertTrue(hasattr(_a , '''num_audio_channels''' ) ) self.assertTrue(hasattr(_a , '''hop_length''' ) ) self.assertTrue(hasattr(_a , '''chunk_length''' ) ) self.assertTrue(hasattr(_a , '''sampling_rate''' ) ) def __lowercase ( self ) -> Optional[int]: _a : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _a : int = feat_extract_first.save_pretrained(_a )[0] check_json_file_has_correct_format(_a ) _a : Dict = self.feature_extraction_class.from_pretrained(_a ) _a : List[Any] = feat_extract_first.to_dict() _a : Union[str, Any] = feat_extract_second.to_dict() _a : Any = dict_first.pop('''mel_filters''' ) _a : int = dict_second.pop('''mel_filters''' ) self.assertTrue(np.allclose(_a , _a ) ) self.assertEqual(_a , _a ) def __lowercase ( self ) -> Optional[int]: _a : Any = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _a : Optional[int] = os.path.join(_a , '''feat_extract.json''' ) feat_extract_first.to_json_file(_a ) _a : List[str] = self.feature_extraction_class.from_json_file(_a ) _a : List[Any] = feat_extract_first.to_dict() _a : Dict = feat_extract_second.to_dict() _a : str = dict_first.pop('''mel_filters''' ) _a : str = dict_second.pop('''mel_filters''' ) self.assertTrue(np.allclose(_a , _a ) ) self.assertEqual(_a , _a ) def __lowercase ( self ) -> Union[str, Any]: # Initialize feature_extractor _a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict ) # create three inputs of length 800, 1000, and 1200 _a : Any = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )] _a : List[str] = [np.asarray(_a ) for speech_input in speech_inputs] # Test not batched input _a : Tuple = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test batched _a : Dict = feature_extractor(_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test audio masking _a : Union[str, Any] = feature_extractor( _a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 , mask_audio=_a ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test 2-D numpy arrays are batched. _a : Optional[Any] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)] _a : int = np.asarray(_a ) _a : Tuple = feature_extractor(_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) def __lowercase ( self , _a ) -> Optional[Any]: _a : List[Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech _a : Optional[int] = ds.sort('''id''' ).select(range(_a ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def __lowercase ( self ) -> int: _a : Union[str, Any] = self._load_datasamples(1 ) _a : int = TvltFeatureExtractor() _a : Union[str, Any] = feature_extractor(_a , return_tensors='''pt''' ).audio_values self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) ) _a : Union[str, Any] = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] ) self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _a , atol=1e-4 ) )
15
0
from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging a__ = logging.get_logger(__name__) class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : Tuple = ["pixel_values"] def __init__( self , _a = True , _a = None , _a = PILImageResampling.BICUBIC , _a = True , _a = None , _a = True , _a = 1 / 2_5_5 , _a = True , _a = IMAGENET_DEFAULT_MEAN , _a = IMAGENET_DEFAULT_STD , **_a , ) -> None: super().__init__(**_a ) _a : int = size if size is not None else {'''shortest_edge''': 2_2_4} _a : List[str] = get_size_dict(_a , default_to_square=_a ) _a : Tuple = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4} _a : Optional[int] = get_size_dict(_a , param_name='''crop_size''' ) _a : Optional[int] = do_resize _a : str = size _a : List[str] = resample _a : str = do_center_crop _a : Optional[Any] = crop_size _a : Union[str, Any] = do_rescale _a : str = rescale_factor _a : List[Any] = do_normalize _a : Dict = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN _a : Optional[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD def __lowercase ( self , _a , _a , _a = PILImageResampling.BICUBIC , _a = None , **_a , ) -> np.ndarray: _a : Optional[int] = get_size_dict(_a , default_to_square=_a ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: _a : Optional[int] = int((2_5_6 / 2_2_4) * size['''shortest_edge'''] ) _a : Any = get_resize_output_image_size(_a , size=_a , default_to_square=_a ) _a : Dict = {'''height''': output_size[0], '''width''': output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( F"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" ) return resize( _a , size=(size_dict['''height'''], size_dict['''width''']) , resample=_a , data_format=_a , **_a ) def __lowercase ( self , _a , _a , _a = None , **_a , ) -> np.ndarray: _a : int = get_size_dict(_a ) if "height" not in size or "width" not in size: raise ValueError(F"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" ) return center_crop(_a , size=(size['''height'''], size['''width''']) , data_format=_a , **_a ) def __lowercase ( self , _a , _a , _a = None , **_a , ) -> np.ndarray: return rescale(_a , scale=_a , data_format=_a , **_a ) def __lowercase ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray: return normalize(_a , mean=_a , std=_a , data_format=_a , **_a ) def __lowercase ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> BatchFeature: _a : int = do_resize if do_resize is not None else self.do_resize _a : Optional[Any] = resample if resample is not None else self.resample _a : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop _a : Any = do_rescale if do_rescale is not None else self.do_rescale _a : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor _a : Dict = do_normalize if do_normalize is not None else self.do_normalize _a : Union[str, Any] = image_mean if image_mean is not None else self.image_mean _a : Union[str, Any] = image_std if image_std is not None else self.image_std _a : Union[str, Any] = size if size is not None else self.size _a : str = get_size_dict(_a , default_to_square=_a ) _a : Optional[int] = crop_size if crop_size is not None else self.crop_size _a : Union[str, Any] = get_size_dict(_a , param_name='''crop_size''' ) _a : int = make_list_of_images(_a ) if not valid_images(_a ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. _a : Tuple = [to_numpy_array(_a ) for image in images] if do_resize: _a : int = [self.resize(_a , _a , _a ) for image in images] if do_center_crop: _a : Any = [self.center_crop(_a , _a ) for image in images] if do_rescale: _a : Any = [self.rescale(_a , _a ) for image in images] if do_normalize: _a : Optional[int] = [self.normalize(_a , _a , _a ) for image in images] _a : Optional[int] = [to_channel_dimension_format(_a , _a ) for image in images] _a : Dict = {'''pixel_values''': images} return BatchFeature(data=_a , tensor_type=_a )
355
from typing import Dict import numpy as np from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException if is_tf_available(): import tensorflow as tf from ..tf_utils import stable_softmax if is_torch_available(): import torch a__ = logging.get_logger(__name__) @add_end_docstrings( __lowercase , r"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , ) class UpperCAmelCase_ ( __lowercase ): """simple docstring""" def __lowercase ( self , _a ) -> np.ndarray: if self.framework == "tf": _a : List[str] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy() elif self.framework == "pt": _a : Tuple = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_a ) else: raise ValueError('''Unsupported framework''' ) return masked_index def __lowercase ( self , _a ) -> np.ndarray: _a : int = self.get_masked_index(_a ) _a : Tuple = np.prod(masked_index.shape ) if numel < 1: raise PipelineException( '''fill-mask''' , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , ) def __lowercase ( self , _a ) -> Optional[int]: if isinstance(_a , _a ): for model_input in model_inputs: self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] ) else: for input_ids in model_inputs["input_ids"]: self._ensure_exactly_one_mask_token(_a ) def __lowercase ( self , _a , _a=None , **_a ) -> Dict[str, GenericTensor]: if return_tensors is None: _a : Union[str, Any] = self.framework _a : str = self.tokenizer(_a , return_tensors=_a ) self.ensure_exactly_one_mask_token(_a ) return model_inputs def __lowercase ( self , _a ) -> Optional[Any]: _a : List[str] = self.model(**_a ) _a : Any = model_inputs['''input_ids'''] return model_outputs def __lowercase ( self , _a , _a=5 , _a=None ) -> str: # Cap top_k if there are targets if target_ids is not None and target_ids.shape[0] < top_k: _a : List[Any] = target_ids.shape[0] _a : Any = model_outputs['''input_ids'''][0] _a : List[str] = model_outputs['''logits'''] if self.framework == "tf": _a : Tuple = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0] _a : List[str] = outputs.numpy() _a : Dict = outputs[0, masked_index, :] _a : str = stable_softmax(_a , axis=-1 ) if target_ids is not None: _a : Any = tf.gather_nd(tf.squeeze(_a , 0 ) , target_ids.reshape(-1 , 1 ) ) _a : Union[str, Any] = tf.expand_dims(_a , 0 ) _a : Optional[int] = tf.math.top_k(_a , k=_a ) _a , _a : Optional[Any] = topk.values.numpy(), topk.indices.numpy() else: _a : Optional[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_a ).squeeze(-1 ) # Fill mask pipeline supports only one ${mask_token} per sample _a : List[str] = outputs[0, masked_index, :] _a : List[Any] = logits.softmax(dim=-1 ) if target_ids is not None: _a : List[Any] = probs[..., target_ids] _a , _a : Optional[Any] = probs.topk(_a ) _a : Dict = [] _a : List[Any] = values.shape[0] == 1 for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ): _a : Optional[Any] = [] for v, p in zip(_values , _predictions ): # Copy is important since we're going to modify this array in place _a : Optional[int] = input_ids.numpy().copy() if target_ids is not None: _a : Tuple = target_ids[p].tolist() _a : List[str] = p # Filter padding out: _a : List[Any] = tokens[np.where(tokens != self.tokenizer.pad_token_id )] # Originally we skip special tokens to give readable output. # For multi masks though, the other [MASK] would be removed otherwise # making the output look odd, so we add them back _a : List[str] = self.tokenizer.decode(_a , skip_special_tokens=_a ) _a : List[Any] = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence} row.append(_a ) result.append(_a ) if single_mask: return result[0] return result def __lowercase ( self , _a , _a=None ) -> Dict: if isinstance(_a , _a ): _a : Tuple = [targets] try: _a : int = self.tokenizer.get_vocab() except Exception: _a : Any = {} _a : List[Any] = [] for target in targets: _a : List[Any] = vocab.get(_a , _a ) if id_ is None: _a : Tuple = self.tokenizer( _a , add_special_tokens=_a , return_attention_mask=_a , return_token_type_ids=_a , max_length=1 , truncation=_a , )['''input_ids'''] if len(_a ) == 0: logger.warning( F"""The specified target token `{target}` does not exist in the model vocabulary. """ '''We cannot replace it with anything meaningful, ignoring it''' ) continue _a : Tuple = input_ids[0] # XXX: If users encounter this pass # it becomes pretty slow, so let's make sure # The warning enables them to fix the input to # get faster performance. logger.warning( F"""The specified target token `{target}` does not exist in the model vocabulary. """ F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" ) target_ids.append(id_ ) _a : List[str] = list(set(_a ) ) if len(_a ) == 0: raise ValueError('''At least one target must be provided when passed.''' ) _a : int = np.array(_a ) return target_ids def __lowercase ( self , _a=None , _a=None ) -> Tuple: _a : str = {} if targets is not None: _a : List[Any] = self.get_target_ids(_a , _a ) _a : Optional[Any] = target_ids if top_k is not None: _a : Union[str, Any] = top_k if self.tokenizer.mask_token_id is None: raise PipelineException( '''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' ) return {}, {}, postprocess_params def __call__( self , _a , *_a , **_a ) -> int: _a : Optional[Any] = super().__call__(_a , **_a ) if isinstance(_a , _a ) and len(_a ) == 1: return outputs[0] return outputs
15
0
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( HubertConfig, HubertForCTC, HubertModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() a__ = logging.get_logger(__name__) a__ = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[int] ,__a : Optional[int] ,__a : List[str] ,__a : List[Any] ) -> Union[str, Any]: """simple docstring""" for attribute in key.split('''.''' ): _a : Optional[Any] = getattr(__a ,__a ) if weight_type is not None: _a : Dict = getattr(__a ,__a ).shape else: _a : Optional[int] = hf_pointer.shape assert hf_shape == value.shape, ( F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": _a : List[Any] = value elif weight_type == "weight_g": _a : Any = value elif weight_type == "weight_v": _a : Union[str, Any] = value elif weight_type == "bias": _a : Optional[int] = value else: _a : List[Any] = value logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def __UpperCAmelCase ( __a : Any ,__a : Union[str, Any] ,__a : Union[str, Any] ) -> int: """simple docstring""" _a : Union[str, Any] = [] _a : Union[str, Any] = fairseq_model.state_dict() _a : Union[str, Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): _a : int = False if "conv_layers" in name: load_conv_layer( __a ,__a ,__a ,__a ,hf_model.config.feat_extract_norm == '''group''' ,) _a : Optional[Any] = True else: for key, mapped_key in MAPPING.items(): _a : Union[str, Any] = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned): _a : Any = True if "*" in mapped_key: _a : Optional[int] = name.split(__a )[0].split('''.''' )[-2] _a : Any = mapped_key.replace('''*''' ,__a ) if "weight_g" in name: _a : List[Any] = '''weight_g''' elif "weight_v" in name: _a : List[str] = '''weight_v''' elif "weight" in name: _a : Any = '''weight''' elif "bias" in name: _a : str = '''bias''' else: _a : Any = None set_recursively(__a ,__a ,__a ,__a ,__a ) continue if not is_used: unused_weights.append(__a ) logger.warning(F"""Unused weights: {unused_weights}""" ) def __UpperCAmelCase ( __a : int ,__a : Optional[Any] ,__a : Dict ,__a : List[str] ,__a : Any ) -> Tuple: """simple docstring""" _a : int = full_name.split('''conv_layers.''' )[-1] _a : Any = name.split('''.''' ) _a : List[Any] = int(items[0] ) _a : Optional[int] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) _a : Optional[int] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) _a : Optional[Any] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) _a : int = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) _a : Any = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(__a ) @torch.no_grad() def __UpperCAmelCase ( __a : Dict ,__a : List[Any] ,__a : List[str]=None ,__a : Optional[int]=None ,__a : int=True ) -> List[Any]: """simple docstring""" if config_path is not None: _a : Tuple = HubertConfig.from_pretrained(__a ) else: _a : Any = HubertConfig() if is_finetuned: if dict_path: _a : Tuple = Dictionary.load(__a ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _a : Any = target_dict.pad_index _a : Tuple = target_dict.bos_index _a : Optional[int] = target_dict.eos_index _a : Optional[Any] = len(target_dict.symbols ) _a : Tuple = os.path.join(__a ,'''vocab.json''' ) if not os.path.isdir(__a ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__a ) ) return os.makedirs(__a ,exist_ok=__a ) with open(__a ,'''w''' ,encoding='''utf-8''' ) as vocab_handle: json.dump(target_dict.indices ,__a ) _a : Tuple = WavaVecaCTCTokenizer( __a ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token='''|''' ,do_lower_case=__a ,) _a : Tuple = True if config.feat_extract_norm == '''layer''' else False _a : List[Any] = WavaVecaFeatureExtractor( feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=__a ,return_attention_mask=__a ,) _a : List[Any] = WavaVecaProcessor(feature_extractor=__a ,tokenizer=__a ) processor.save_pretrained(__a ) _a : Tuple = HubertForCTC(__a ) else: _a : Tuple = HubertModel(__a ) if is_finetuned: _a : int = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] ,arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: _a : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) _a : Any = model[0].eval() recursively_load_weights(__a ,__a ,__a ) hf_wavavec.save_pretrained(__a ) if __name__ == "__main__": a__ = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) a__ = parser.parse_args() convert_hubert_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
356
import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow a__ = [ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ '''text-classification''', '''language-modeling''', '''summarization''', '''token-classification''', '''question-answering''', ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_mlm_flax import run_qa import run_summarization_flax import run_ta_mlm_flax logging.basicConfig(level=logging.DEBUG) a__ = logging.getLogger() def __UpperCAmelCase ( ) -> Optional[int]: """simple docstring""" _a : Any = argparse.ArgumentParser() parser.add_argument('''-f''' ) _a : Dict = parser.parse_args() return args.f def __UpperCAmelCase ( __a : Optional[int] ,__a : List[str]="eval" ) -> Any: """simple docstring""" _a : Any = os.path.join(__a ,F"""{split}_results.json""" ) if os.path.exists(__a ): with open(__a ,'''r''' ) as f: return json.load(__a ) raise ValueError(F"""can't find {path}""" ) a__ = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class UpperCAmelCase_ ( __lowercase ): """simple docstring""" def __lowercase ( self ) -> str: _a : Any = self.get_auto_remove_tmp_dir() _a : Optional[Any] = F""" run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --eval_steps=2 --warmup_steps=2 --seed=42 --max_seq_length=128 """.split() with patch.object(_a , '''argv''' , _a ): run_flax_glue.main() _a : Any = get_results(_a ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) @slow def __lowercase ( self ) -> Dict: _a : Tuple = self.get_auto_remove_tmp_dir() _a : Tuple = F""" run_clm_flax.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --block_size 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir """.split() with patch.object(_a , '''argv''' , _a ): run_clm_flax.main() _a : List[str] = get_results(_a ) self.assertLess(result['''eval_perplexity'''] , 1_0_0 ) @slow def __lowercase ( self ) -> Optional[int]: _a : str = self.get_auto_remove_tmp_dir() _a : Optional[int] = F""" run_summarization.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --test_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=8 --do_train --do_eval --do_predict --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --predict_with_generate """.split() with patch.object(_a , '''argv''' , _a ): run_summarization_flax.main() _a : Optional[int] = get_results(_a , split='''test''' ) self.assertGreaterEqual(result['''test_rouge1'''] , 1_0 ) self.assertGreaterEqual(result['''test_rouge2'''] , 2 ) self.assertGreaterEqual(result['''test_rougeL'''] , 7 ) self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 ) @slow def __lowercase ( self ) -> Tuple: _a : List[str] = self.get_auto_remove_tmp_dir() _a : List[Any] = F""" run_mlm.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --overwrite_output_dir --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --logging_steps 2 --eval_steps 2 --do_train --do_eval --num_train_epochs=1 """.split() with patch.object(_a , '''argv''' , _a ): run_mlm_flax.main() _a : List[Any] = get_results(_a ) self.assertLess(result['''eval_perplexity'''] , 4_2 ) @slow def __lowercase ( self ) -> Dict: _a : Optional[Any] = self.get_auto_remove_tmp_dir() _a : int = F""" run_t5_mlm_flax.py --model_name_or_path t5-small --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir """.split() with patch.object(_a , '''argv''' , _a ): run_ta_mlm_flax.main() _a : List[Any] = get_results(_a ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 ) @slow def __lowercase ( self ) -> Optional[Any]: # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu _a : Any = 7 if get_gpu_count() > 1 else 2 _a : List[Any] = self.get_auto_remove_tmp_dir() _a : List[Any] = F""" run_flax_ner.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --overwrite_output_dir --do_train --do_eval --warmup_steps=2 --learning_rate=2e-4 --logging_steps 2 --eval_steps 2 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 """.split() with patch.object(_a , '''argv''' , _a ): run_flax_ner.main() _a : Dict = get_results(_a ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) self.assertGreaterEqual(result['''eval_f1'''] , 0.3 ) @slow def __lowercase ( self ) -> Any: _a : Optional[int] = self.get_auto_remove_tmp_dir() _a : Union[str, Any] = F""" run_qa.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=2 --do_train --do_eval --logging_steps 2 --eval_steps 2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 """.split() with patch.object(_a , '''argv''' , _a ): run_qa.main() _a : Any = get_results(_a ) self.assertGreaterEqual(result['''eval_f1'''] , 3_0 ) self.assertGreaterEqual(result['''eval_exact'''] , 3_0 )
15
0
from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class UpperCAmelCase_ ( __lowercase ): """simple docstring""" def __init__( self , _a , _a = None , _a = None , _a = True , _a = None , _a = False , _a = None , _a = True , _a = "arrow" , **_a , ) -> Union[str, Any]: super().__init__( split=_a , features=_a , cache_dir=_a , keep_in_memory=_a , streaming=_a , **_a , ) _a : Optional[int] = load_from_cache_file _a : List[Any] = file_format _a : Optional[int] = Spark( df=_a , features=_a , cache_dir=_a , working_dir=_a , **_a , ) def __lowercase ( self ) -> Optional[Any]: if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) _a : Union[str, Any] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=_a , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
357
import argparse import os import re import packaging.version a__ = '''examples/''' a__ = { '''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''), '''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''), '''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''), '''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''), } a__ = { '''init''': '''src/transformers/__init__.py''', '''setup''': '''setup.py''', } a__ = '''README.md''' def __UpperCAmelCase ( __a : List[str] ,__a : int ,__a : Optional[Any] ) -> int: """simple docstring""" with open(__a ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f: _a : Tuple = f.read() _a , _a : str = REPLACE_PATTERNS[pattern] _a : List[str] = replace.replace('''VERSION''' ,__a ) _a : List[Any] = re_pattern.sub(__a ,__a ) with open(__a ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f: f.write(__a ) def __UpperCAmelCase ( __a : Any ) -> List[Any]: """simple docstring""" for folder, directories, fnames in os.walk(__a ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('''research_projects''' ) if "legacy" in directories: directories.remove('''legacy''' ) for fname in fnames: if fname.endswith('''.py''' ): update_version_in_file(os.path.join(__a ,__a ) ,__a ,pattern='''examples''' ) def __UpperCAmelCase ( __a : List[Any] ,__a : List[str]=False ) -> int: """simple docstring""" for pattern, fname in REPLACE_FILES.items(): update_version_in_file(__a ,__a ,__a ) if not patch: update_version_in_examples(__a ) def __UpperCAmelCase ( ) -> List[str]: """simple docstring""" _a : Optional[Any] = '''🤗 Transformers currently provides the following architectures''' _a : str = '''1. Want to contribute a new model?''' with open(__a ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f: _a : Optional[int] = f.readlines() # Find the start of the list. _a : Optional[int] = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 _a : List[Any] = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('''1.''' ): _a : Tuple = lines[index].replace( '''https://huggingface.co/docs/transformers/main/model_doc''' ,'''https://huggingface.co/docs/transformers/model_doc''' ,) index += 1 with open(__a ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f: f.writelines(__a ) def __UpperCAmelCase ( ) -> List[str]: """simple docstring""" with open(REPLACE_FILES['''init'''] ,'''r''' ) as f: _a : Optional[Any] = f.read() _a : Optional[Any] = REPLACE_PATTERNS['''init'''][0].search(__a ).groups()[0] return packaging.version.parse(__a ) def __UpperCAmelCase ( __a : Dict=False ) -> str: """simple docstring""" _a : Optional[Any] = get_version() if patch and default_version.is_devrelease: raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' ) if default_version.is_devrelease: _a : List[Any] = default_version.base_version elif patch: _a : str = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}""" else: _a : List[str] = F"""{default_version.major}.{default_version.minor + 1}.0""" # Now let's ask nicely if that's the right one. _a : Dict = input(F"""Which version are you releasing? [{default_version}]""" ) if len(__a ) == 0: _a : int = default_version print(F"""Updating version to {version}.""" ) global_version_update(__a ,patch=__a ) if not patch: print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() def __UpperCAmelCase ( ) -> Tuple: """simple docstring""" _a : str = get_version() _a : int = F"""{current_version.major}.{current_version.minor + 1}.0.dev0""" _a : List[Any] = current_version.base_version # Check with the user we got that right. _a : Union[str, Any] = input(F"""Which version are we developing now? [{dev_version}]""" ) if len(__a ) == 0: _a : List[str] = dev_version print(F"""Updating version to {version}.""" ) global_version_update(__a ) print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() if __name__ == "__main__": a__ = argparse.ArgumentParser() parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''') parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''') a__ = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('''Nothing to do after a patch :-)''') else: post_release_work()
15
0
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification def __UpperCAmelCase ( __a : Tuple ) -> Optional[int]: """simple docstring""" _a : List[Any] = SwinvaConfig() _a : Optional[Any] = swinva_name.split('''_''' ) _a : Union[str, Any] = name_split[1] if "to" in name_split[3]: _a : List[str] = int(name_split[3][-3:] ) else: _a : Dict = int(name_split[3] ) if "to" in name_split[2]: _a : List[Any] = int(name_split[2][-2:] ) else: _a : Optional[int] = int(name_split[2][6:] ) if model_size == "tiny": _a : Tuple = 96 _a : Optional[Any] = (2, 2, 6, 2) _a : Any = (3, 6, 12, 24) elif model_size == "small": _a : Dict = 96 _a : Dict = (2, 2, 18, 2) _a : Optional[Any] = (3, 6, 12, 24) elif model_size == "base": _a : Dict = 128 _a : str = (2, 2, 18, 2) _a : Union[str, Any] = (4, 8, 16, 32) else: _a : Dict = 192 _a : List[str] = (2, 2, 18, 2) _a : Any = (6, 12, 24, 48) if "to" in swinva_name: _a : Optional[Any] = (12, 12, 12, 6) if ("22k" in swinva_name) and ("to" not in swinva_name): _a : int = 21_841 _a : str = '''huggingface/label-files''' _a : int = '''imagenet-22k-id2label.json''' _a : Any = json.load(open(hf_hub_download(__a ,__a ,repo_type='''dataset''' ) ,'''r''' ) ) _a : Any = {int(__a ): v for k, v in idalabel.items()} _a : str = idalabel _a : Any = {v: k for k, v in idalabel.items()} else: _a : Optional[Any] = 1_000 _a : str = '''huggingface/label-files''' _a : Optional[int] = '''imagenet-1k-id2label.json''' _a : Optional[int] = json.load(open(hf_hub_download(__a ,__a ,repo_type='''dataset''' ) ,'''r''' ) ) _a : Any = {int(__a ): v for k, v in idalabel.items()} _a : int = idalabel _a : Dict = {v: k for k, v in idalabel.items()} _a : Dict = img_size _a : Any = num_classes _a : str = embed_dim _a : Dict = depths _a : Optional[Any] = num_heads _a : Optional[Any] = window_size return config def __UpperCAmelCase ( __a : Any ) -> str: """simple docstring""" if "patch_embed.proj" in name: _a : Dict = name.replace('''patch_embed.proj''' ,'''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: _a : Tuple = name.replace('''patch_embed.norm''' ,'''embeddings.norm''' ) if "layers" in name: _a : Union[str, Any] = '''encoder.''' + name if "attn.proj" in name: _a : int = name.replace('''attn.proj''' ,'''attention.output.dense''' ) if "attn" in name: _a : int = name.replace('''attn''' ,'''attention.self''' ) if "norm1" in name: _a : Any = name.replace('''norm1''' ,'''layernorm_before''' ) if "norm2" in name: _a : Optional[Any] = name.replace('''norm2''' ,'''layernorm_after''' ) if "mlp.fc1" in name: _a : Any = name.replace('''mlp.fc1''' ,'''intermediate.dense''' ) if "mlp.fc2" in name: _a : str = name.replace('''mlp.fc2''' ,'''output.dense''' ) if "q_bias" in name: _a : Dict = name.replace('''q_bias''' ,'''query.bias''' ) if "k_bias" in name: _a : List[Any] = name.replace('''k_bias''' ,'''key.bias''' ) if "v_bias" in name: _a : Dict = name.replace('''v_bias''' ,'''value.bias''' ) if "cpb_mlp" in name: _a : List[Any] = name.replace('''cpb_mlp''' ,'''continuous_position_bias_mlp''' ) if name == "norm.weight": _a : str = '''layernorm.weight''' if name == "norm.bias": _a : Optional[Any] = '''layernorm.bias''' if "head" in name: _a : Any = name.replace('''head''' ,'''classifier''' ) else: _a : Optional[int] = '''swinv2.''' + name return name def __UpperCAmelCase ( __a : Tuple ,__a : Optional[int] ) -> Any: """simple docstring""" for key in orig_state_dict.copy().keys(): _a : Any = orig_state_dict.pop(__a ) if "mask" in key: continue elif "qkv" in key: _a : List[Any] = key.split('''.''' ) _a : Union[str, Any] = int(key_split[1] ) _a : Optional[int] = int(key_split[3] ) _a : List[str] = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: _a : Any = val[:dim, :] _a : Tuple = val[dim : dim * 2, :] _a : Optional[Any] = val[-dim:, :] else: _a : Optional[int] = val[:dim] _a : List[str] = val[ dim : dim * 2 ] _a : Any = val[-dim:] else: _a : List[str] = val return orig_state_dict def __UpperCAmelCase ( __a : List[Any] ,__a : Dict ) -> Any: """simple docstring""" _a : Any = timm.create_model(__a ,pretrained=__a ) timm_model.eval() _a : Optional[int] = get_swinva_config(__a ) _a : str = SwinvaForImageClassification(__a ) model.eval() _a : Union[str, Any] = convert_state_dict(timm_model.state_dict() ,__a ) model.load_state_dict(__a ) _a : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' _a : Dict = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swinva_name.replace('''_''' ,'''-''' ) ) ) _a : Tuple = Image.open(requests.get(__a ,stream=__a ).raw ) _a : Dict = image_processor(images=__a ,return_tensors='''pt''' ) _a : Any = timm_model(inputs['''pixel_values'''] ) _a : List[str] = model(**__a ).logits assert torch.allclose(__a ,__a ,atol=1E-3 ) print(F"""Saving model {swinva_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__a ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(__a ) model.push_to_hub( repo_path_or_name=Path(__a ,__a ) ,organization='''nandwalritik''' ,commit_message='''Add model''' ,) if __name__ == "__main__": a__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--swinv2_name''', default='''swinv2_tiny_patch4_window8_256''', type=str, help='''Name of the Swinv2 timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) a__ = parser.parse_args() convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
358
def __UpperCAmelCase ( __a : int ) -> int: """simple docstring""" if n == 1 or not isinstance(__a ,__a ): return 0 elif n == 2: return 1 else: _a : Any = [0, 1] for i in range(2 ,n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def __UpperCAmelCase ( __a : int ) -> int: """simple docstring""" _a : Any = 0 _a : Dict = 2 while digits < n: index += 1 _a : Dict = len(str(fibonacci(__a ) ) ) return index def __UpperCAmelCase ( __a : int = 1_000 ) -> int: """simple docstring""" return fibonacci_digits_index(__a ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
15
0
from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
359
from sklearn.metrics import fa_score, matthews_corrcoef import datasets from .record_evaluation import evaluate as evaluate_record a__ = '''\ @article{wang2019superglue, title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems}, author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R}, journal={arXiv preprint arXiv:1905.00537}, year={2019} } ''' a__ = '''\ SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after GLUE with a new set of more difficult language understanding tasks, improved resources, and a new public leaderboard. ''' a__ = ''' Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset. Args: predictions: list of predictions to score. Depending on the SuperGlUE subset: - for \'record\': list of question-answer dictionaries with the following keys: - \'idx\': index of the question as specified by the dataset - \'prediction_text\': the predicted answer text - for \'multirc\': list of question-answer dictionaries with the following keys: - \'idx\': index of the question-answer pair as specified by the dataset - \'prediction\': the predicted answer label - otherwise: list of predicted labels references: list of reference labels. Depending on the SuperGLUE subset: - for \'record\': list of question-answers dictionaries with the following keys: - \'idx\': index of the question as specified by the dataset - \'answers\': list of possible answers - otherwise: list of reference labels Returns: depending on the SuperGLUE subset: - for \'record\': - \'exact_match\': Exact match between answer and gold answer - \'f1\': F1 score - for \'multirc\': - \'exact_match\': Exact match between answer and gold answer - \'f1_m\': Per-question macro-F1 score - \'f1_a\': Average F1 score over all answers - for \'axb\': \'matthews_correlation\': Matthew Correlation - for \'cb\': - \'accuracy\': Accuracy - \'f1\': F1 score - for all others: - \'accuracy\': Accuracy Examples: >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"] >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\') >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0, \'f1\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\') >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}] >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 1.0, \'f1\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\') >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'matthews_correlation\': 1.0} ''' def __UpperCAmelCase ( __a : int ,__a : List[str] ) -> Optional[Any]: """simple docstring""" return float((preds == labels).mean() ) def __UpperCAmelCase ( __a : List[Any] ,__a : Union[str, Any] ,__a : List[str]="binary" ) -> Optional[int]: """simple docstring""" _a : List[str] = simple_accuracy(__a ,__a ) _a : Any = float(fa_score(y_true=__a ,y_pred=__a ,average=__a ) ) return { "accuracy": acc, "f1": fa, } def __UpperCAmelCase ( __a : Optional[Any] ,__a : str ) -> List[Any]: """simple docstring""" _a : Union[str, Any] = {} for id_pred, label in zip(__a ,__a ): _a : Optional[int] = F"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}""" _a : Optional[Any] = id_pred['''prediction'''] if question_id in question_map: question_map[question_id].append((pred, label) ) else: _a : str = [(pred, label)] _a , _a : Any = [], [] for question, preds_labels in question_map.items(): _a , _a : Any = zip(*__a ) _a : List[Any] = fa_score(y_true=__a ,y_pred=__a ,average='''macro''' ) fas.append(__a ) _a : List[str] = int(sum(pred == label for pred, label in preds_labels ) == len(__a ) ) ems.append(__a ) _a : List[str] = float(sum(__a ) / len(__a ) ) _a : str = sum(__a ) / len(__a ) _a : Optional[int] = float(fa_score(y_true=__a ,y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) ) return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a} @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase_ ( datasets.Metric ): """simple docstring""" def __lowercase ( self ) -> List[Any]: if self.config_name not in [ "boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg", ]: raise KeyError( '''You should supply a configuration name selected in ''' '''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , ) def __lowercase ( self ) -> Any: if self.config_name == "record": return { "predictions": { "idx": { "passage": datasets.Value('''int64''' ), "query": datasets.Value('''int64''' ), }, "prediction_text": datasets.Value('''string''' ), }, "references": { "idx": { "passage": datasets.Value('''int64''' ), "query": datasets.Value('''int64''' ), }, "answers": datasets.Sequence(datasets.Value('''string''' ) ), }, } elif self.config_name == "multirc": return { "predictions": { "idx": { "answer": datasets.Value('''int64''' ), "paragraph": datasets.Value('''int64''' ), "question": datasets.Value('''int64''' ), }, "prediction": datasets.Value('''int64''' ), }, "references": datasets.Value('''int64''' ), } else: return { "predictions": datasets.Value('''int64''' ), "references": datasets.Value('''int64''' ), } def __lowercase ( self , _a , _a ) -> Optional[Any]: if self.config_name == "axb": return {"matthews_correlation": matthews_corrcoef(_a , _a )} elif self.config_name == "cb": return acc_and_fa(_a , _a , fa_avg='''macro''' ) elif self.config_name == "record": _a : Any = [ { '''qas''': [ {'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]} for ref in references ] } ] _a : Any = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions} return evaluate_record(_a , _a )[0] elif self.config_name == "multirc": return evaluate_multirc(_a , _a ) elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]: return {"accuracy": simple_accuracy(_a , _a )} else: raise KeyError( '''You should supply a configuration name selected in ''' '''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
15
0
from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) a__ = logging.get_logger(__name__) # pylint: disable=invalid-name a__ = ''' Examples: ```py >>> import torch >>> import numpy as np >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline >>> from transformers import pipeline >>> from diffusers.utils import load_image >>> def make_hint(image, depth_estimator): ... image = depth_estimator(image)["depth"] ... image = np.array(image) ... image = image[:, :, None] ... image = np.concatenate([image, image, image], axis=2) ... detected_map = torch.from_numpy(image).float() / 255.0 ... hint = detected_map.permute(2, 0, 1) ... return hint >>> depth_estimator = pipeline("depth-estimation") >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained( ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 ... ) >>> pipe_prior = pipe_prior.to("cuda") >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained( ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16 ... ) >>> pipe = pipe.to("cuda") >>> img = load_image( ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" ... "/kandinsky/cat.png" ... ).resize((768, 768)) >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda") >>> prompt = "A robot, 4k photo" >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature" >>> generator = torch.Generator(device="cuda").manual_seed(43) >>> image_emb, zero_image_emb = pipe_prior( ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator ... ).to_tuple() >>> images = pipe( ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... hint=hint, ... num_inference_steps=50, ... generator=generator, ... height=768, ... width=768, ... ).images >>> images[0].save("robot_cat.png") ``` ''' def __UpperCAmelCase ( __a : Optional[int] ,__a : List[Any] ,__a : Union[str, Any]=8 ) -> Tuple: """simple docstring""" _a : Union[str, Any] = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 _a : List[Any] = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class UpperCAmelCase_ ( __lowercase ): """simple docstring""" def __init__( self , _a , _a , _a , ) -> List[str]: """simple docstring""" super().__init__() self.register_modules( unet=_a , scheduler=_a , movq=_a , ) _a : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1) def __lowercase ( self , _a , _a , _a , _a , _a , _a ) -> List[str]: """simple docstring""" if latents is None: _a : List[Any] = randn_tensor(_a , generator=_a , device=_a , dtype=_a ) else: if latents.shape != shape: raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" ) _a : Union[str, Any] = latents.to(_a ) _a : Dict = latents * scheduler.init_noise_sigma return latents def __lowercase ( self , _a=0 ) -> List[str]: """simple docstring""" if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('''Please install accelerate via `pip install accelerate`''' ) _a : List[str] = torch.device(F"""cuda:{gpu_id}""" ) _a : Tuple = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(_a , _a ) def __lowercase ( self , _a=0 ) -> List[Any]: """simple docstring""" if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ): from accelerate import cpu_offload_with_hook else: raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' ) _a : int = torch.device(F"""cuda:{gpu_id}""" ) if self.device.type != "cpu": self.to('''cpu''' , silence_dtype_warnings=_a ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) _a : List[Any] = None for cpu_offloaded_model in [self.unet, self.movq]: _a : int = cpu_offload_with_hook(_a , _a , prev_module_hook=_a ) # We'll offload the last model manually. _a : Optional[Any] = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def __lowercase ( self ) -> Union[str, Any]: """simple docstring""" if not hasattr(self.unet , '''_hf_hook''' ): return self.device for module in self.unet.modules(): if ( hasattr(_a , '''_hf_hook''' ) and hasattr(module._hf_hook , '''execution_device''' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(_a ) def __call__( self , _a , _a , _a , _a = 5_1_2 , _a = 5_1_2 , _a = 1_0_0 , _a = 4.0 , _a = 1 , _a = None , _a = None , _a = "pil" , _a = True , ) -> Optional[int]: """simple docstring""" _a : List[Any] = self._execution_device _a : int = guidance_scale > 1.0 if isinstance(_a , _a ): _a : Optional[int] = torch.cat(_a , dim=0 ) if isinstance(_a , _a ): _a : Union[str, Any] = torch.cat(_a , dim=0 ) if isinstance(_a , _a ): _a : Optional[Any] = torch.cat(_a , dim=0 ) _a : Union[str, Any] = image_embeds.shape[0] * num_images_per_prompt if do_classifier_free_guidance: _a : Optional[Any] = image_embeds.repeat_interleave(_a , dim=0 ) _a : Any = negative_image_embeds.repeat_interleave(_a , dim=0 ) _a : Union[str, Any] = hint.repeat_interleave(_a , dim=0 ) _a : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_a ) _a : Tuple = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=_a ) self.scheduler.set_timesteps(_a , device=_a ) _a : Optional[Any] = self.scheduler.timesteps _a : str = self.movq.config.latent_channels _a : Union[str, Any] = downscale_height_and_width(_a , _a , self.movq_scale_factor ) # create initial latent _a : Optional[int] = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , _a , _a , _a , self.scheduler , ) for i, t in enumerate(self.progress_bar(_a ) ): # expand the latents if we are doing classifier free guidance _a : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents _a : Union[str, Any] = {'''image_embeds''': image_embeds, '''hint''': hint} _a : str = self.unet( sample=_a , timestep=_a , encoder_hidden_states=_a , added_cond_kwargs=_a , return_dict=_a , )[0] if do_classifier_free_guidance: _a : List[str] = noise_pred.split(latents.shape[1] , dim=1 ) _a : Any = noise_pred.chunk(2 ) _a : Optional[int] = variance_pred.chunk(2 ) _a : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) _a : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , '''variance_type''' ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): _a : str = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 _a : str = self.scheduler.step( _a , _a , _a , generator=_a , )[0] # post-processing _a : Optional[Any] = self.movq.decode(_a , force_not_quantize=_a )['''sample'''] if output_type not in ["pt", "np", "pil"]: raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" ) if output_type in ["np", "pil"]: _a : Optional[int] = image * 0.5 + 0.5 _a : Tuple = image.clamp(0 , 1 ) _a : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": _a : Tuple = self.numpy_to_pil(_a ) if not return_dict: return (image,) return ImagePipelineOutput(images=_a )
360
import numpy as np def __UpperCAmelCase ( __a : np.ndarray ,__a : np.ndarray ,__a : float = 1E-12 ,__a : int = 100 ,) -> tuple[float, np.ndarray]: """simple docstring""" assert np.shape(__a )[0] == np.shape(__a )[1] # Ensure proper dimensionality. assert np.shape(__a )[0] == np.shape(__a )[0] # Ensure inputs are either both complex or both real assert np.iscomplexobj(__a ) == np.iscomplexobj(__a ) _a : List[str] = np.iscomplexobj(__a ) if is_complex: # Ensure complex input_matrix is Hermitian assert np.array_equal(__a ,input_matrix.conj().T ) # Set convergence to False. Will define convergence when we exceed max_iterations # or when we have small changes from one iteration to next. _a : List[str] = False _a : List[str] = 0 _a : Tuple = 0 _a : str = 1E12 while not convergence: # Multiple matrix by the vector. _a : str = np.dot(__a ,__a ) # Normalize the resulting output vector. _a : List[Any] = w / np.linalg.norm(__a ) # Find rayleigh quotient # (faster than usual b/c we know vector is normalized already) _a : Dict = vector.conj().T if is_complex else vector.T _a : Tuple = np.dot(__a ,np.dot(__a ,__a ) ) # Check convergence. _a : List[str] = np.abs(lambda_ - lambda_previous ) / lambda_ iterations += 1 if error <= error_tol or iterations >= max_iterations: _a : Dict = True _a : str = lambda_ if is_complex: _a : Tuple = np.real(lambda_ ) return lambda_, vector def __UpperCAmelCase ( ) -> None: """simple docstring""" _a : List[str] = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] ) _a : int = np.array([41, 4, 20] ) _a : Optional[Any] = real_input_matrix.astype(np.complexaaa ) _a : int = np.triu(1j * complex_input_matrix ,1 ) complex_input_matrix += imag_matrix complex_input_matrix += -1 * imag_matrix.T _a : Union[str, Any] = np.array([41, 4, 20] ).astype(np.complexaaa ) for problem_type in ["real", "complex"]: if problem_type == "real": _a : Optional[int] = real_input_matrix _a : Union[str, Any] = real_vector elif problem_type == "complex": _a : str = complex_input_matrix _a : str = complex_vector # Our implementation. _a , _a : Optional[Any] = power_iteration(__a ,__a ) # Numpy implementation. # Get eigenvalues and eigenvectors using built-in numpy # eigh (eigh used for symmetric or hermetian matrices). _a , _a : List[str] = np.linalg.eigh(__a ) # Last eigenvalue is the maximum one. _a : Tuple = eigen_values[-1] # Last column in this matrix is eigenvector corresponding to largest eigenvalue. _a : List[Any] = eigen_vectors[:, -1] # Check our implementation and numpy gives close answers. assert np.abs(eigen_value - eigen_value_max ) <= 1E-6 # Take absolute values element wise of each eigenvector. # as they are only unique to a minus sign. assert np.linalg.norm(np.abs(__a ) - np.abs(__a ) ) <= 1E-6 if __name__ == "__main__": import doctest doctest.testmod() test_power_iteration()
15
0
a__ = {str(digit): digit**5 for digit in range(10)} def __UpperCAmelCase ( __a : int ) -> int: """simple docstring""" return sum(DIGITS_FIFTH_POWER[digit] for digit in str(__a ) ) def __UpperCAmelCase ( ) -> int: """simple docstring""" return sum( number for number in range(1_000 ,1_000_000 ) if number == digits_fifth_powers_sum(__a ) ) if __name__ == "__main__": print(solution())
361
import itertools from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class UpperCAmelCase_ ( datasets.BuilderConfig ): """simple docstring""" UpperCAmelCase__ : Optional[datasets.Features] = None class UpperCAmelCase_ ( datasets.ArrowBasedBuilder ): """simple docstring""" UpperCAmelCase__ : Any = PandasConfig def __lowercase ( self ) -> Any: return datasets.DatasetInfo(features=self.config.features ) def __lowercase ( self , _a ) -> List[Any]: if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) _a : str = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_a , (str, list, tuple) ): _a : Dict = data_files if isinstance(_a , _a ): _a : Dict = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive _a : int = [dl_manager.iter_files(_a ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] _a : Optional[Any] = [] for split_name, files in data_files.items(): if isinstance(_a , _a ): _a : List[str] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive _a : Any = [dl_manager.iter_files(_a ) for file in files] splits.append(datasets.SplitGenerator(name=_a , gen_kwargs={'''files''': files} ) ) return splits def __lowercase ( self , _a ) -> pa.Table: if self.config.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example _a : Optional[Any] = table_cast(_a , self.config.features.arrow_schema ) return pa_table def __lowercase ( self , _a ) -> List[str]: for i, file in enumerate(itertools.chain.from_iterable(_a ) ): with open(_a , '''rb''' ) as f: _a : str = pa.Table.from_pandas(pd.read_pickle(_a ) ) yield i, self._cast_table(_a )
15
0
from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig a__ = logging.get_logger(__name__) # General docstring a__ = '''RegNetConfig''' # Base docstring a__ = '''facebook/regnet-y-040''' a__ = [1, 1088, 7, 7] # Image classification docstring a__ = '''facebook/regnet-y-040''' a__ = '''tabby, tabby cat''' a__ = [ '''facebook/regnet-y-040''', # See all regnet models at https://huggingface.co/models?filter=regnet ] class UpperCAmelCase_ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , _a , _a = 3 , _a = 1 , _a = 1 , _a = "relu" , **_a , ) -> str: super().__init__(**_a ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb _a : Optional[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) _a : Tuple = tf.keras.layers.ConvaD( filters=_a , kernel_size=_a , strides=_a , padding='''VALID''' , groups=_a , use_bias=_a , name='''convolution''' , ) _a : Optional[Any] = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' ) _a : List[str] = ACTaFN[activation] if activation is not None else tf.identity def __lowercase ( self , _a ) -> List[Any]: _a : List[Any] = self.convolution(self.padding(_a ) ) _a : Tuple = self.normalization(_a ) _a : Union[str, Any] = self.activation(_a ) return hidden_state class UpperCAmelCase_ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , _a , **_a ) -> Tuple: super().__init__(**_a ) _a : List[str] = config.num_channels _a : Dict = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , ) def __lowercase ( self , _a ) -> Union[str, Any]: _a : List[str] = shape_list(_a )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( '''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) _a : Dict = tf.transpose(_a , perm=(0, 2, 3, 1) ) _a : List[str] = self.embedder(_a ) return hidden_state class UpperCAmelCase_ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , _a , _a = 2 , **_a ) -> int: super().__init__(**_a ) _a : Optional[Any] = tf.keras.layers.ConvaD( filters=_a , kernel_size=1 , strides=_a , use_bias=_a , name='''convolution''' ) _a : str = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' ) def __lowercase ( self , _a , _a = False ) -> tf.Tensor: return self.normalization(self.convolution(_a ) , training=_a ) class UpperCAmelCase_ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , _a , _a , **_a ) -> int: super().__init__(**_a ) _a : List[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_a , name='''pooler''' ) _a : Optional[int] = [ tf.keras.layers.ConvaD(filters=_a , kernel_size=1 , activation='''relu''' , name='''attention.0''' ), tf.keras.layers.ConvaD(filters=_a , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ), ] def __lowercase ( self , _a ) -> List[Any]: # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] _a : str = self.pooler(_a ) for layer_module in self.attention: _a : Dict = layer_module(_a ) _a : Optional[Any] = hidden_state * pooled return hidden_state class UpperCAmelCase_ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , _a , _a , _a , _a = 1 , **_a ) -> Union[str, Any]: super().__init__(**_a ) _a : Union[str, Any] = in_channels != out_channels or stride != 1 _a : str = max(1 , out_channels // config.groups_width ) _a : Tuple = ( TFRegNetShortCut(_a , stride=_a , name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' , name='''shortcut''' ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. _a : Tuple = [ TFRegNetConvLayer(_a , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ), TFRegNetConvLayer( _a , stride=_a , groups=_a , activation=config.hidden_act , name='''layer.1''' ), TFRegNetConvLayer(_a , kernel_size=1 , activation=_a , name='''layer.2''' ), ] _a : int = ACTaFN[config.hidden_act] def __lowercase ( self , _a ) -> Any: _a : Tuple = hidden_state for layer_module in self.layers: _a : List[str] = layer_module(_a ) _a : Optional[int] = self.shortcut(_a ) hidden_state += residual _a : Union[str, Any] = self.activation(_a ) return hidden_state class UpperCAmelCase_ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , _a , _a , _a , _a = 1 , **_a ) -> List[str]: super().__init__(**_a ) _a : List[str] = in_channels != out_channels or stride != 1 _a : Union[str, Any] = max(1 , out_channels // config.groups_width ) _a : int = ( TFRegNetShortCut(_a , stride=_a , name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' , name='''shortcut''' ) ) _a : Any = [ TFRegNetConvLayer(_a , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ), TFRegNetConvLayer( _a , stride=_a , groups=_a , activation=config.hidden_act , name='''layer.1''' ), TFRegNetSELayer(_a , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ), TFRegNetConvLayer(_a , kernel_size=1 , activation=_a , name='''layer.3''' ), ] _a : Union[str, Any] = ACTaFN[config.hidden_act] def __lowercase ( self , _a ) -> List[str]: _a : Tuple = hidden_state for layer_module in self.layers: _a : str = layer_module(_a ) _a : int = self.shortcut(_a ) hidden_state += residual _a : List[Any] = self.activation(_a ) return hidden_state class UpperCAmelCase_ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , _a , _a , _a , _a = 2 , _a = 2 , **_a ) -> Union[str, Any]: super().__init__(**_a ) _a : Union[str, Any] = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer _a : Any = [ # downsampling is done in the first layer with stride of 2 layer(_a , _a , _a , stride=_a , name='''layers.0''' ), *[layer(_a , _a , _a , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )], ] def __lowercase ( self , _a ) -> Any: for layer_module in self.layers: _a : Tuple = layer_module(_a ) return hidden_state class UpperCAmelCase_ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , _a , **_a ) -> List[str]: super().__init__(**_a ) _a : int = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( _a , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) ) _a : Any = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(_a , config.depths[1:] ) ): self.stages.append(TFRegNetStage(_a , _a , _a , depth=_a , name=F"""stages.{i+1}""" ) ) def __lowercase ( self , _a , _a = False , _a = True ) -> TFBaseModelOutputWithNoAttention: _a : List[Any] = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: _a : Optional[Any] = hidden_states + (hidden_state,) _a : Union[str, Any] = stage_module(_a ) if output_hidden_states: _a : Tuple = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=_a , hidden_states=_a ) @keras_serializable class UpperCAmelCase_ ( tf.keras.layers.Layer ): """simple docstring""" UpperCAmelCase__ : Optional[Any] = RegNetConfig def __init__( self , _a , **_a ) -> Optional[int]: super().__init__(**_a ) _a : List[Any] = config _a : Union[str, Any] = TFRegNetEmbeddings(_a , name='''embedder''' ) _a : List[str] = TFRegNetEncoder(_a , name='''encoder''' ) _a : Optional[int] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_a , name='''pooler''' ) @unpack_inputs def __lowercase ( self , _a , _a = None , _a = None , _a = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention: _a : int = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _a : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict _a : Union[str, Any] = self.embedder(_a , training=_a ) _a : Any = self.encoder( _a , output_hidden_states=_a , return_dict=_a , training=_a ) _a : Tuple = encoder_outputs[0] _a : int = self.pooler(_a ) # Change to NCHW output format have uniformity in the modules _a : Union[str, Any] = tf.transpose(_a , perm=(0, 3, 1, 2) ) _a : Optional[int] = tf.transpose(_a , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: _a : int = tuple([tf.transpose(_a , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=_a , pooler_output=_a , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : Union[str, Any] = RegNetConfig UpperCAmelCase__ : int = "regnet" UpperCAmelCase__ : Dict = "pixel_values" @property def __lowercase ( self ) -> Tuple: return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )} a__ = R''' Parameters: This model is a Tensorflow [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and behavior. config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. ''' a__ = R''' Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConveNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top." , __lowercase , ) class UpperCAmelCase_ ( __lowercase ): """simple docstring""" def __init__( self , _a , *_a , **_a ) -> Any: super().__init__(_a , *_a , **_a ) _a : Optional[Any] = TFRegNetMainLayer(_a , name='''regnet''' ) @unpack_inputs @add_start_docstrings_to_model_forward(_a ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=_a , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def __lowercase ( self , _a , _a = None , _a = None , _a=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]: _a : str = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _a : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict _a : Tuple = self.regnet( pixel_values=_a , output_hidden_states=_a , return_dict=_a , training=_a , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , __lowercase , ) class UpperCAmelCase_ ( __lowercase , __lowercase ): """simple docstring""" def __init__( self , _a , *_a , **_a ) -> Tuple: super().__init__(_a , *_a , **_a ) _a : str = config.num_labels _a : int = TFRegNetMainLayer(_a , name='''regnet''' ) # classification head _a : Optional[int] = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(_a ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def __lowercase ( self , _a = None , _a = None , _a = None , _a = None , _a=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: _a : int = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _a : List[str] = return_dict if return_dict is not None else self.config.use_return_dict _a : List[str] = self.regnet( _a , output_hidden_states=_a , return_dict=_a , training=_a ) _a : Dict = outputs.pooler_output if return_dict else outputs[1] _a : Dict = self.classifier[0](_a ) _a : List[str] = self.classifier[1](_a ) _a : List[Any] = None if labels is None else self.hf_compute_loss(labels=_a , logits=_a ) if not return_dict: _a : int = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=_a , logits=_a , hidden_states=outputs.hidden_states )
362
def __UpperCAmelCase ( __a : int ,__a : int ,__a : int ) -> int: """simple docstring""" if exponent == 1: return base if exponent % 2 == 0: _a : List[Any] = _modexpt(__a ,exponent // 2 ,__a ) % modulo_value return (x * x) % modulo_value else: return (base * _modexpt(__a ,exponent - 1 ,__a )) % modulo_value def __UpperCAmelCase ( __a : int = 1_777 ,__a : int = 1_855 ,__a : int = 8 ) -> int: """simple docstring""" _a : List[Any] = base for _ in range(1 ,__a ): _a : Any = _modexpt(__a ,__a ,10**digits ) return result if __name__ == "__main__": print(f'''{solution() = }''')
15
0
def __UpperCAmelCase ( __a : int ,__a : list ) -> List[str]: """simple docstring""" _enforce_args(__a ,__a ) if n == 0: return 0 _a : Optional[Any] = float('''-inf''' ) for i in range(1 ,n + 1 ): _a : List[Any] = max( __a ,prices[i - 1] + naive_cut_rod_recursive(n - i ,__a ) ) return max_revue def __UpperCAmelCase ( __a : int ,__a : list ) -> Dict: """simple docstring""" _enforce_args(__a ,__a ) _a : Optional[Any] = [float('''-inf''' ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(__a ,__a ,__a ) def __UpperCAmelCase ( __a : int ,__a : list ,__a : list ) -> Union[str, Any]: """simple docstring""" if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: _a : Optional[int] = float('''-inf''' ) for i in range(1 ,n + 1 ): _a : Union[str, Any] = max( __a ,prices[i - 1] + _top_down_cut_rod_recursive(n - i ,__a ,__a ) ,) _a : Dict = max_revenue return max_rev[n] def __UpperCAmelCase ( __a : int ,__a : list ) -> str: """simple docstring""" _enforce_args(__a ,__a ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. _a : Any = [float('''-inf''' ) for _ in range(n + 1 )] _a : str = 0 for i in range(1 ,n + 1 ): _a : List[Any] = max_rev[i] for j in range(1 ,i + 1 ): _a : Tuple = max(__a ,prices[j - 1] + max_rev[i - j] ) _a : Optional[int] = max_revenue_i return max_rev[n] def __UpperCAmelCase ( __a : int ,__a : list ) -> Optional[Any]: """simple docstring""" if n < 0: _a : Union[str, Any] = F"""n must be greater than or equal to 0. Got n = {n}""" raise ValueError(__a ) if n > len(__a ): _a : Optional[Any] = ( '''Each integral piece of rod must have a corresponding price. ''' F"""Got n = {n} but length of prices = {len(__a )}""" ) raise ValueError(__a ) def __UpperCAmelCase ( ) -> Dict: """simple docstring""" _a : Dict = [6, 10, 12, 15, 20, 23] _a : Tuple = len(__a ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. _a : str = 36 _a : List[Any] = top_down_cut_rod(__a ,__a ) _a : Union[str, Any] = bottom_up_cut_rod(__a ,__a ) _a : Dict = naive_cut_rod_recursive(__a ,__a ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
363
import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging a__ = '''\ ''' a__ = ''' Perplexity (PPL) is one of the most common metrics for evaluating language models. It is defined as the exponentiated average negative log-likelihood of a sequence. For more information, see https://huggingface.co/docs/transformers/perplexity ''' a__ = ''' Args: model_id (str): model used for calculating Perplexity NOTE: Perplexity can only be calculated for causal language models. This includes models such as gpt2, causal variations of bert, causal versions of t5, and more (the full list can be found in the AutoModelForCausalLM documentation here: https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM ) input_texts (list of str): input text, each separate text snippet is one list entry. batch_size (int): the batch size to run texts through the model. Defaults to 16. add_start_token (bool): whether to add the start token to the texts, so the perplexity can include the probability of the first word. Defaults to True. device (str): device to run on, defaults to \'cuda\' when available Returns: perplexity: dictionary containing the perplexity scores for the texts in the input list, as well as the mean perplexity. If one of the input texts is longer than the max input length of the model, then it is truncated to the max length for the perplexity computation. Examples: Example 1: >>> perplexity = datasets.load_metric("perplexity") >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"] >>> results = perplexity.compute(model_id=\'gpt2\', ... add_start_token=False, ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) [\'perplexities\', \'mean_perplexity\'] >>> print(round(results["mean_perplexity"], 2)) 78.22 >>> print(round(results["perplexities"][0], 2)) 11.11 Example 2: >>> perplexity = datasets.load_metric("perplexity") >>> input_texts = datasets.load_dataset("wikitext", ... "wikitext-2-raw-v1", ... split="test")["text"][:50] # doctest:+ELLIPSIS [...] >>> input_texts = [s for s in input_texts if s!=\'\'] >>> results = perplexity.compute(model_id=\'gpt2\', ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) [\'perplexities\', \'mean_perplexity\'] >>> print(round(results["mean_perplexity"], 2)) 60.35 >>> print(round(results["perplexities"][0], 2)) 81.12 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase_ ( datasets.Metric ): """simple docstring""" def __lowercase ( self ) -> Any: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''input_texts''': datasets.Value('''string''' ), } ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , ) def __lowercase ( self , _a , _a , _a = 1_6 , _a = True , _a=None ) -> List[Any]: if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": _a : List[str] = '''cuda''' else: _a : Optional[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu''' _a : Dict = AutoModelForCausalLM.from_pretrained(_a ) _a : List[Any] = model.to(_a ) _a : List[str] = AutoTokenizer.from_pretrained(_a ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: _a : str = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(_a ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" _a : List[Any] = model.config.max_length - 1 else: _a : List[str] = model.config.max_length _a : Union[str, Any] = tokenizer( _a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , return_tensors='''pt''' , return_attention_mask=_a , ).to(_a ) _a : List[Any] = encodings['''input_ids'''] _a : int = encodings['''attention_mask'''] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." _a : Optional[int] = [] _a : Dict = CrossEntropyLoss(reduction='''none''' ) for start_index in logging.tqdm(range(0 , len(_a ) , _a ) ): _a : Dict = min(start_index + batch_size , len(_a ) ) _a : Union[str, Any] = encoded_texts[start_index:end_index] _a : int = attn_masks[start_index:end_index] if add_start_token: _a : Dict = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_a ) _a : List[str] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) _a : Dict = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_a ), attn_mask] , dim=1 ) _a : Dict = encoded_batch with torch.no_grad(): _a : Any = model(_a , attention_mask=_a ).logits _a : List[str] = out_logits[..., :-1, :].contiguous() _a : Union[str, Any] = labels[..., 1:].contiguous() _a : Optional[int] = attn_mask[..., 1:].contiguous() _a : Union[str, Any] = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , _a ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(_a )}
15
0
"""simple docstring""" from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging a__ = logging.get_logger(__name__) a__ = { '''huggingface/time-series-transformer-tourism-monthly''': ( '''https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json''' ), # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer } class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : Tuple = "time_series_transformer" UpperCAmelCase__ : Union[str, Any] = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", "num_hidden_layers": "encoder_layers", } def __init__( self , _a = None , _a = None , _a = "student_t" , _a = "nll" , _a = 1 , _a = [1, 2, 3, 4, 5, 6, 7] , _a = "mean" , _a = 0 , _a = 0 , _a = 0 , _a = 0 , _a = None , _a = None , _a = 3_2 , _a = 3_2 , _a = 2 , _a = 2 , _a = 2 , _a = 2 , _a = True , _a = "gelu" , _a = 6_4 , _a = 0.1 , _a = 0.1 , _a = 0.1 , _a = 0.1 , _a = 0.1 , _a = 1_0_0 , _a = 0.02 , _a=True , **_a , ) -> Optional[int]: # time series specific configuration _a : Optional[Any] = prediction_length _a : Dict = context_length or prediction_length _a : Optional[Any] = distribution_output _a : Dict = loss _a : Dict = input_size _a : Dict = num_time_features _a : Any = lags_sequence _a : Optional[Any] = scaling _a : Any = num_dynamic_real_features _a : Any = num_static_real_features _a : Optional[int] = num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(_a ) != num_static_categorical_features: raise ValueError( '''The cardinality should be a list of the same length as `num_static_categorical_features`''' ) _a : List[Any] = cardinality else: _a : Optional[int] = [0] if embedding_dimension and num_static_categorical_features > 0: if len(_a ) != num_static_categorical_features: raise ValueError( '''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' ) _a : str = embedding_dimension else: _a : Optional[int] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality] _a : Union[str, Any] = num_parallel_samples # Transformer architecture configuration _a : Any = input_size * len(_a ) + self._number_of_features _a : int = d_model _a : int = encoder_attention_heads _a : Dict = decoder_attention_heads _a : Optional[int] = encoder_ffn_dim _a : Union[str, Any] = decoder_ffn_dim _a : Dict = encoder_layers _a : Tuple = decoder_layers _a : List[Any] = dropout _a : Tuple = attention_dropout _a : Any = activation_dropout _a : int = encoder_layerdrop _a : int = decoder_layerdrop _a : Tuple = activation_function _a : Optional[Any] = init_std _a : int = use_cache super().__init__(is_encoder_decoder=_a , **_a ) @property def __lowercase ( self ) -> int: return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
364
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a__ = { '''configuration_xmod''': [ '''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XmodConfig''', '''XmodOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ = [ '''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XmodForCausalLM''', '''XmodForMaskedLM''', '''XmodForMultipleChoice''', '''XmodForQuestionAnswering''', '''XmodForSequenceClassification''', '''XmodForTokenClassification''', '''XmodModel''', '''XmodPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
15
0
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, PNDMScheduler, StableDiffusionLDMaDPipeline, UNetaDConditionModel, ) from diffusers.utils import nightly, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS enable_full_determinism() class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" UpperCAmelCase__ : int = StableDiffusionLDMaDPipeline UpperCAmelCase__ : List[Any] = TEXT_TO_IMAGE_PARAMS UpperCAmelCase__ : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS UpperCAmelCase__ : str = TEXT_TO_IMAGE_IMAGE_PARAMS def __lowercase ( self ) -> List[str]: torch.manual_seed(0 ) _a : Optional[Any] = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , ) _a : Optional[Any] = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_a , set_alpha_to_one=_a , ) torch.manual_seed(0 ) _a : str = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=6 , out_channels=6 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) _a : Any = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) _a : List[str] = CLIPTextModel(_a ) _a : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) _a : Tuple = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def __lowercase ( self , _a , _a=0 ) -> Optional[Any]: if str(_a ).startswith('''mps''' ): _a : Tuple = torch.manual_seed(_a ) else: _a : str = torch.Generator(device=_a ).manual_seed(_a ) _a : Optional[int] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def __lowercase ( self ) -> int: _a : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator _a : Union[str, Any] = self.get_dummy_components() _a : str = StableDiffusionLDMaDPipeline(**_a ) _a : int = ldmad_pipe.to(_a ) ldmad_pipe.set_progress_bar_config(disable=_a ) _a : Dict = self.get_dummy_inputs(_a ) _a : Any = ldmad_pipe(**_a ) _a : Union[str, Any] = output.rgb, output.depth _a : Tuple = rgb[0, -3:, -3:, -1] _a : int = depth[0, -3:, -1] assert rgb.shape == (1, 6_4, 6_4, 3) assert depth.shape == (1, 6_4, 6_4) _a : int = np.array( [0.3733_8176, 0.7_0247, 0.7420_3193, 0.5164_3604, 0.5825_6793, 0.6093_2136, 0.418_1095, 0.4835_5877, 0.4653_5262] ) _a : Any = np.array([103.4_6727, 85.81_2004, 87.84_9236] ) assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2 assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2 def __lowercase ( self ) -> List[str]: _a : Optional[int] = self.get_dummy_components() _a : Optional[Any] = StableDiffusionLDMaDPipeline(**_a ) _a : int = ldmad_pipe.to(_a ) ldmad_pipe.set_progress_bar_config(disable=_a ) _a : Union[str, Any] = self.get_dummy_inputs(_a ) _a : Union[str, Any] = 3 * [inputs['''prompt''']] # forward _a : List[Any] = ldmad_pipe(**_a ) _a : str = output.rgb, output.depth _a : str = rgb_slice_a[0, -3:, -3:, -1] _a : Optional[Any] = depth_slice_a[0, -3:, -1] _a : Any = self.get_dummy_inputs(_a ) _a : List[Any] = 3 * [inputs.pop('''prompt''' )] _a : int = ldmad_pipe.tokenizer( _a , padding='''max_length''' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=_a , return_tensors='''pt''' , ) _a : Tuple = text_inputs['''input_ids'''].to(_a ) _a : Union[str, Any] = ldmad_pipe.text_encoder(_a )[0] _a : Optional[int] = prompt_embeds # forward _a : Dict = ldmad_pipe(**_a ) _a : List[str] = output.rgb, output.depth _a : Union[str, Any] = rgb_slice_a[0, -3:, -3:, -1] _a : int = depth_slice_a[0, -3:, -1] assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4 assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4 def __lowercase ( self ) -> Dict: _a : Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator _a : List[str] = self.get_dummy_components() _a : Any = PNDMScheduler(skip_prk_steps=_a ) _a : Any = StableDiffusionLDMaDPipeline(**_a ) _a : Any = ldmad_pipe.to(_a ) ldmad_pipe.set_progress_bar_config(disable=_a ) _a : Tuple = self.get_dummy_inputs(_a ) _a : int = '''french fries''' _a : Tuple = ldmad_pipe(**_a , negative_prompt=_a ) _a : Optional[Any] = output.rgb, output.depth _a : List[Any] = rgb[0, -3:, -3:, -1] _a : Tuple = depth[0, -3:, -1] assert rgb.shape == (1, 6_4, 6_4, 3) assert depth.shape == (1, 6_4, 6_4) _a : Any = np.array( [0.3_7044, 0.7181_1503, 0.722_3251, 0.4860_3675, 0.563_8391, 0.636_4948, 0.4283_3704, 0.490_1315, 0.4792_6217] ) _a : int = np.array([107.8_4738, 84.6_2802, 89.96_2135] ) assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2 assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2 @slow @require_torch_gpu class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __lowercase ( self ) -> Dict: super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowercase ( self , _a , _a="cpu" , _a=torch.floataa , _a=0 ) -> Tuple: _a : int = torch.Generator(device=_a ).manual_seed(_a ) _a : Optional[int] = np.random.RandomState(_a ).standard_normal((1, 4, 6_4, 6_4) ) _a : Dict = torch.from_numpy(_a ).to(device=_a , dtype=_a ) _a : int = { '''prompt''': '''a photograph of an astronaut riding a horse''', '''latents''': latents, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def __lowercase ( self ) -> Optional[Any]: _a : Tuple = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' ) _a : Optional[Any] = ldmad_pipe.to(_a ) ldmad_pipe.set_progress_bar_config(disable=_a ) _a : int = self.get_inputs(_a ) _a : Optional[int] = ldmad_pipe(**_a ) _a : Any = output.rgb, output.depth _a : Tuple = rgb[0, -3:, -3:, -1].flatten() _a : int = rgb[0, -3:, -1].flatten() assert rgb.shape == (1, 5_1_2, 5_1_2, 3) assert depth.shape == (1, 5_1_2, 5_1_2) _a : int = np.array( [0.5380_5465, 0.5670_7305, 0.548_6515, 0.5701_2236, 0.581_4511, 0.5625_3487, 0.5484_3014, 0.5509_2263, 0.645_9706] ) _a : Dict = np.array( [0.926_3781, 0.667_8672, 0.548_6515, 0.9220_2145, 0.6783_1135, 0.5625_3487, 0.924_1694, 0.755_1478, 0.645_9706] ) assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3 assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3 @nightly @require_torch_gpu class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __lowercase ( self ) -> Tuple: super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowercase ( self , _a , _a="cpu" , _a=torch.floataa , _a=0 ) -> Optional[int]: _a : Optional[Any] = torch.Generator(device=_a ).manual_seed(_a ) _a : Dict = np.random.RandomState(_a ).standard_normal((1, 4, 6_4, 6_4) ) _a : Optional[Any] = torch.from_numpy(_a ).to(device=_a , dtype=_a ) _a : str = { '''prompt''': '''a photograph of an astronaut riding a horse''', '''latents''': latents, '''generator''': generator, '''num_inference_steps''': 5_0, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def __lowercase ( self ) -> Tuple: _a : Optional[Any] = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' ).to(_a ) ldmad_pipe.set_progress_bar_config(disable=_a ) _a : Any = self.get_inputs(_a ) _a : List[str] = ldmad_pipe(**_a ) _a : Any = output.rgb, output.depth _a : str = 0.49_5586 _a : Union[str, Any] = 0.3379_5515 _a : Any = 112.4_8518 _a : List[Any] = 98.48_9746 assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3 assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3 assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3 assert np.abs(expected_depth_std - depth.std() ) < 1e-3 def __lowercase ( self ) -> int: _a : Optional[Any] = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d-4c''' ).to(_a ) ldmad_pipe.set_progress_bar_config(disable=_a ) _a : List[str] = self.get_inputs(_a ) _a : List[Any] = ldmad_pipe(**_a ) _a : Union[str, Any] = output.rgb, output.depth _a : int = 0.419_4127 _a : str = 0.3537_5586 _a : str = 0.563_8502 _a : Tuple = 0.3468_6103 assert rgb.shape == (1, 5_1_2, 5_1_2, 3) assert depth.shape == (1, 5_1_2, 5_1_2, 1) assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3 assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3 assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3 assert np.abs(expected_depth_std - depth.std() ) < 1e-3
365
import re import tempfile from pathlib import Path import pytest import yaml from datasets.utils.readme import ReadMe # @pytest.fixture # def example_yaml_structure(): a__ = yaml.safe_load( '''\ name: "" allow_empty: false allow_empty_text: true subsections: - name: "Dataset Card for X" # First-level markdown heading allow_empty: false allow_empty_text: true subsections: - name: "Table of Contents" allow_empty: false allow_empty_text: false subsections: null - name: "Dataset Description" allow_empty: false allow_empty_text: false subsections: - name: "Dataset Summary" allow_empty: false allow_empty_text: false subsections: null - name: "Supported Tasks and Leaderboards" allow_empty: true allow_empty_text: true subsections: null - name: Languages allow_empty: false allow_empty_text: true subsections: null ''' ) a__ = { '''name''': '''root''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ { '''name''': '''Dataset Card for My Dataset''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ {'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []}, { '''name''': '''Dataset Description''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Dataset Summary''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [], }, { '''name''': '''Supported Tasks and Leaderboards''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], }, {'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []}, ], }, ], } ], } a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. #### Extra Ignored Subsection ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = { '''name''': '''root''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ { '''name''': '''Dataset Card for My Dataset''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ {'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []}, { '''name''': '''Dataset Description''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Dataset Summary''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Extra Ignored Subsection''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], } ], }, { '''name''': '''Supported Tasks and Leaderboards''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], }, {'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []}, ], }, ], } ], } a__ = '''\ --- --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = ( '''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.''' ) a__ = '''\ # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = ( '''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.''' ) a__ = '''\ --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Languages Language Text ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.''' a__ = '''\ --- language: - zh - en --- ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text # Dataset Card My Dataset ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.''' a__ = '''\ --- language: - zh - en --- # Dataset Card My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.''' a__ = '''''' a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.''' @pytest.mark.parametrize( '''readme_md, expected_dict''' ,[ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] ,) def __UpperCAmelCase ( __a : Union[str, Any] ,__a : List[str] ) -> Optional[int]: """simple docstring""" assert ReadMe.from_string(__a ,__a ).to_dict() == expected_dict @pytest.mark.parametrize( '''readme_md, expected_error''' ,[ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] ,) def __UpperCAmelCase ( __a : List[str] ,__a : Optional[Any] ) -> Union[str, Any]: """simple docstring""" with pytest.raises(__a ,match=re.escape(expected_error.format(path='''root''' ) ) ): _a : List[Any] = ReadMe.from_string(__a ,__a ) readme.validate() @pytest.mark.parametrize( '''readme_md, expected_error''' ,[ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] ,) def __UpperCAmelCase ( __a : Dict ,__a : Dict ) -> Tuple: """simple docstring""" with pytest.raises(__a ,match=re.escape(expected_error.format(path='''root''' ) ) ): ReadMe.from_string(__a ,__a ) @pytest.mark.parametrize( '''readme_md,''' ,[ (README_MULTIPLE_SAME_HEADING_1), ] ,) def __UpperCAmelCase ( __a : Optional[Any] ) -> Tuple: """simple docstring""" ReadMe.from_string(__a ,__a ,suppress_parsing_errors=__a ) @pytest.mark.parametrize( '''readme_md, expected_dict''' ,[ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] ,) def __UpperCAmelCase ( __a : Union[str, Any] ,__a : Any ) -> Optional[int]: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: _a : Tuple = Path(__a ) / '''README.md''' with open(__a ,'''w+''' ) as readme_file: readme_file.write(__a ) _a : Optional[Any] = ReadMe.from_readme(__a ,__a ).to_dict() assert out["name"] == path assert out["text"] == "" assert out["is_empty_text"] assert out["subsections"] == expected_dict["subsections"] @pytest.mark.parametrize( '''readme_md, expected_error''' ,[ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] ,) def __UpperCAmelCase ( __a : List[Any] ,__a : List[Any] ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: _a : int = Path(__a ) / '''README.md''' with open(__a ,'''w+''' ) as readme_file: readme_file.write(__a ) _a : Optional[int] = expected_error.format(path=__a ) with pytest.raises(__a ,match=re.escape(__a ) ): _a : Any = ReadMe.from_readme(__a ,__a ) readme.validate() @pytest.mark.parametrize( '''readme_md, expected_error''' ,[ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] ,) def __UpperCAmelCase ( __a : str ,__a : Union[str, Any] ) -> Dict: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: _a : Optional[Any] = Path(__a ) / '''README.md''' with open(__a ,'''w+''' ) as readme_file: readme_file.write(__a ) _a : str = expected_error.format(path=__a ) with pytest.raises(__a ,match=re.escape(__a ) ): ReadMe.from_readme(__a ,__a ) @pytest.mark.parametrize( '''readme_md,''' ,[ (README_MULTIPLE_SAME_HEADING_1), ] ,) def __UpperCAmelCase ( __a : Optional[Any] ) -> str: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: _a : int = Path(__a ) / '''README.md''' with open(__a ,'''w+''' ) as readme_file: readme_file.write(__a ) ReadMe.from_readme(__a ,__a ,suppress_parsing_errors=__a )
15
0
from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def __UpperCAmelCase ( __a : int ) -> bool: """simple docstring""" _a : int = int(number**0.5 ) return number == sq * sq def __UpperCAmelCase ( __a : int ,__a : int ,__a : int ,__a : int ,__a : int ,__a : int ) -> tuple[int, int]: """simple docstring""" _a : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den _a : int = x_den * y_den * z_den _a : int = gcd(__a ,__a ) top //= hcf bottom //= hcf return top, bottom def __UpperCAmelCase ( __a : int = 35 ) -> int: """simple docstring""" _a : set = set() _a : int _a : Fraction = Fraction(0 ) _a : tuple[int, int] for x_num in range(1 ,order + 1 ): for x_den in range(x_num + 1 ,order + 1 ): for y_num in range(1 ,order + 1 ): for y_den in range(y_num + 1 ,order + 1 ): # n=1 _a : Optional[Any] = x_num * y_den + x_den * y_num _a : Any = x_den * y_den _a : List[str] = gcd(__a ,__a ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: _a : List[Any] = add_three( __a ,__a ,__a ,__a ,__a ,__a ) unique_s.add(__a ) # n=2 _a : Dict = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) _a : Tuple = x_den * x_den * y_den * y_den if is_sq(__a ) and is_sq(__a ): _a : List[str] = int(sqrt(__a ) ) _a : List[str] = int(sqrt(__a ) ) _a : List[Any] = gcd(__a ,__a ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: _a : List[Any] = add_three( __a ,__a ,__a ,__a ,__a ,__a ) unique_s.add(__a ) # n=-1 _a : Optional[Any] = x_num * y_num _a : List[Any] = x_den * y_num + x_num * y_den _a : List[Any] = gcd(__a ,__a ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: _a : Any = add_three( __a ,__a ,__a ,__a ,__a ,__a ) unique_s.add(__a ) # n=2 _a : int = x_num * x_num * y_num * y_num _a : Tuple = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(__a ) and is_sq(__a ): _a : str = int(sqrt(__a ) ) _a : Optional[int] = int(sqrt(__a ) ) _a : Optional[int] = gcd(__a ,__a ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: _a : Optional[int] = add_three( __a ,__a ,__a ,__a ,__a ,__a ) unique_s.add(__a ) for num, den in unique_s: total += Fraction(__a ,__a ) return total.denominator + total.numerator if __name__ == "__main__": print(f'''{solution() = }''')
366
from __future__ import annotations def __UpperCAmelCase ( __a : list ) -> float: """simple docstring""" if not nums: raise ValueError('''List is empty''' ) return sum(__a ) / len(__a ) if __name__ == "__main__": import doctest doctest.testmod()
15
0
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class UpperCAmelCase_ ( __lowercase ): """simple docstring""" @slow @require_torch def __lowercase ( self ) -> Dict: _a : Optional[int] = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' ) _a : str = BertTokenizer.from_pretrained('''bert-base-uncased''' ) _a : str = bertabert.config.encoder.vocab_size _a : int = tokenizer.sep_token_id _a : List[Any] = tokenizer.cls_token_id _a : Tuple = 1_2_8 _a : Union[str, Any] = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' ) _a : Optional[int] = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' ) _a : Union[str, Any] = train_dataset.select(range(3_2 ) ) _a : List[Any] = val_dataset.select(range(1_6 ) ) _a : Tuple = 4 def _map_to_encoder_decoder_inputs(_a ): # Tokenizer will automatically set [BOS] <text> [EOS] _a : List[Any] = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=_a , max_length=5_1_2 ) _a : List[Any] = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=_a , max_length=1_2_8 ) _a : Any = inputs.input_ids _a : List[str] = inputs.attention_mask _a : List[Any] = outputs.input_ids _a : Any = outputs.input_ids.copy() _a : Tuple = [ [-1_0_0 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels'''] ] _a : Optional[int] = outputs.attention_mask assert all(len(_a ) == 5_1_2 for x in inputs.input_ids ) assert all(len(_a ) == 1_2_8 for x in outputs.input_ids ) return batch def _compute_metrics(_a ): _a : int = pred.label_ids _a : int = pred.predictions # all unnecessary tokens are removed _a : int = tokenizer.batch_decode(_a , skip_special_tokens=_a ) _a : Union[str, Any] = tokenizer.batch_decode(_a , skip_special_tokens=_a ) _a : str = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_a ) )] ) / len(_a ) return {"accuracy": accuracy} # map train dataset _a : List[Any] = train_dataset.map( _map_to_encoder_decoder_inputs , batched=_a , batch_size=_a , remove_columns=['''article''', '''highlights'''] , ) train_dataset.set_format( type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , ) # same for validation dataset _a : List[str] = val_dataset.map( _map_to_encoder_decoder_inputs , batched=_a , batch_size=_a , remove_columns=['''article''', '''highlights'''] , ) val_dataset.set_format( type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , ) _a : Union[str, Any] = self.get_auto_remove_tmp_dir() _a : List[Any] = SeqaSeqTrainingArguments( output_dir=_a , per_device_train_batch_size=_a , per_device_eval_batch_size=_a , predict_with_generate=_a , evaluation_strategy='''steps''' , do_train=_a , do_eval=_a , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer _a : str = SeqaSeqTrainer( model=_a , args=_a , compute_metrics=_compute_metrics , train_dataset=_a , eval_dataset=_a , tokenizer=_a , ) # start training trainer.train()
367
import argparse import os import torch from transformers.utils import WEIGHTS_NAME a__ = ['''small''', '''medium''', '''large'''] a__ = '''lm_head.decoder.weight''' a__ = '''lm_head.weight''' def __UpperCAmelCase ( __a : str ,__a : str ) -> List[str]: """simple docstring""" _a : Any = torch.load(__a ) _a : List[str] = d.pop(__a ) os.makedirs(__a ,exist_ok=__a ) torch.save(__a ,os.path.join(__a ,__a ) ) if __name__ == "__main__": a__ = argparse.ArgumentParser() parser.add_argument('''--dialogpt_path''', default='''.''', type=str) a__ = parser.parse_args() for MODEL in DIALOGPT_MODELS: a__ = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''') a__ = f'''./DialoGPT-{MODEL}''' convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
15
0
import copy import os from collections import OrderedDict from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a__ = logging.get_logger(__name__) a__ = { '''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''', '''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''', '''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''', } class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : str = "owlvit_text_model" def __init__( self , _a=4_9_4_0_8 , _a=5_1_2 , _a=2_0_4_8 , _a=1_2 , _a=8 , _a=1_6 , _a="quick_gelu" , _a=1e-5 , _a=0.0 , _a=0.02 , _a=1.0 , _a=0 , _a=4_9_4_0_6 , _a=4_9_4_0_7 , **_a , ) -> Dict: super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a ) _a : int = vocab_size _a : Any = hidden_size _a : Optional[Any] = intermediate_size _a : Optional[Any] = num_hidden_layers _a : Optional[int] = num_attention_heads _a : Tuple = max_position_embeddings _a : Any = hidden_act _a : str = layer_norm_eps _a : int = attention_dropout _a : Tuple = initializer_range _a : Optional[Any] = initializer_factor @classmethod def __lowercase ( cls , _a , **_a ) -> "PretrainedConfig": cls._set_token_in_kwargs(_a ) _a : Tuple = cls.get_config_dict(_a , **_a ) # get the text config dict if we are loading from OwlViTConfig if config_dict.get('''model_type''' ) == "owlvit": _a : List[Any] = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(_a , **_a ) class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : List[Any] = "owlvit_vision_model" def __init__( self , _a=7_6_8 , _a=3_0_7_2 , _a=1_2 , _a=1_2 , _a=3 , _a=7_6_8 , _a=3_2 , _a="quick_gelu" , _a=1e-5 , _a=0.0 , _a=0.02 , _a=1.0 , **_a , ) -> str: super().__init__(**_a ) _a : Optional[int] = hidden_size _a : Union[str, Any] = intermediate_size _a : Dict = num_hidden_layers _a : Optional[Any] = num_attention_heads _a : str = num_channels _a : Optional[int] = image_size _a : Optional[Any] = patch_size _a : Optional[int] = hidden_act _a : Optional[int] = layer_norm_eps _a : int = attention_dropout _a : Optional[Any] = initializer_range _a : Any = initializer_factor @classmethod def __lowercase ( cls , _a , **_a ) -> "PretrainedConfig": cls._set_token_in_kwargs(_a ) _a : Tuple = cls.get_config_dict(_a , **_a ) # get the vision config dict if we are loading from OwlViTConfig if config_dict.get('''model_type''' ) == "owlvit": _a : Optional[int] = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(_a , **_a ) class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : List[str] = "owlvit" UpperCAmelCase__ : Optional[int] = True def __init__( self , _a=None , _a=None , _a=5_1_2 , _a=2.6592 , _a=True , **_a , ) -> Tuple: super().__init__(**_a ) if text_config is None: _a : str = {} logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''' ) if vision_config is None: _a : List[Any] = {} logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''' ) _a : int = OwlViTTextConfig(**_a ) _a : Union[str, Any] = OwlViTVisionConfig(**_a ) _a : List[Any] = projection_dim _a : Optional[Any] = logit_scale_init_value _a : List[str] = return_dict _a : Union[str, Any] = 1.0 @classmethod def __lowercase ( cls , _a , **_a ) -> "PretrainedConfig": cls._set_token_in_kwargs(_a ) _a : Optional[Any] = cls.get_config_dict(_a , **_a ) if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(_a , **_a ) @classmethod def __lowercase ( cls , _a , _a , **_a ) -> Union[str, Any]: _a : Dict = {} _a : Tuple = text_config _a : Optional[Any] = vision_config return cls.from_dict(_a , **_a ) def __lowercase ( self ) -> List[str]: _a : Tuple = copy.deepcopy(self.__dict__ ) _a : Tuple = self.text_config.to_dict() _a : Any = self.vision_config.to_dict() _a : Tuple = self.__class__.model_type return output class UpperCAmelCase_ ( __lowercase ): """simple docstring""" @property def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''sequence'''}), ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''attention_mask''', {0: '''batch''', 1: '''sequence'''}), ] ) @property def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''logits_per_image''', {0: '''batch'''}), ('''logits_per_text''', {0: '''batch'''}), ('''text_embeds''', {0: '''batch'''}), ('''image_embeds''', {0: '''batch'''}), ] ) @property def __lowercase ( self ) -> float: return 1e-4 def __lowercase ( self , _a , _a = -1 , _a = -1 , _a = None , ) -> Mapping[str, Any]: _a : int = super().generate_dummy_inputs( processor.tokenizer , batch_size=_a , seq_length=_a , framework=_a ) _a : int = super().generate_dummy_inputs( processor.image_processor , batch_size=_a , framework=_a ) return {**text_input_dict, **image_input_dict} @property def __lowercase ( self ) -> int: return 1_4
368
import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class UpperCAmelCase_ ( enum.Enum ): """simple docstring""" UpperCAmelCase__ : int = 0 UpperCAmelCase__ : Union[str, Any] = 1 UpperCAmelCase__ : Optional[Any] = 2 @add_end_docstrings(__lowercase ) class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : Optional[Any] = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n " def __init__( self , *_a , **_a ) -> List[str]: super().__init__(*_a , **_a ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. _a : Dict = None if self.model.config.prefix is not None: _a : List[Any] = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. _a : Optional[Any] = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. _a , _a , _a : str = self._sanitize_parameters(prefix=_a , **self._forward_params ) _a : Optional[Any] = {**self._preprocess_params, **preprocess_params} _a : List[Any] = {**self._forward_params, **forward_params} def __lowercase ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , **_a , ) -> Optional[int]: _a : List[Any] = {} if prefix is not None: _a : Optional[Any] = prefix if prefix: _a : Dict = self.tokenizer( _a , padding=_a , add_special_tokens=_a , return_tensors=self.framework ) _a : Tuple = prefix_inputs['''input_ids'''].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( F"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected""" ''' [None, \'hole\']''' ) _a : Dict = handle_long_generation preprocess_params.update(_a ) _a : Tuple = generate_kwargs _a : Any = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' ) if return_tensors is not None: raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' ) _a : List[str] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' ) _a : Any = ReturnType.TENSORS if return_type is not None: _a : Any = return_type if clean_up_tokenization_spaces is not None: _a : List[Any] = clean_up_tokenization_spaces if stop_sequence is not None: _a : Tuple = self.tokenizer.encode(_a , add_special_tokens=_a ) if len(_a ) > 1: warnings.warn( '''Stopping on a multiple token sequence is not yet supported on transformers. The first token of''' ''' the stop sequence will be used as the stop sequence string in the interim.''' ) _a : List[Any] = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def __lowercase ( self , *_a , **_a ) -> Union[str, Any]: # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'''add_space_before_punct_symbol''': True} ) return super()._parse_and_tokenize(*_a , **_a ) def __call__( self , _a , **_a ) -> List[str]: return super().__call__(_a , **_a ) def __lowercase ( self , _a , _a="" , _a=None , **_a ) -> List[Any]: _a : Optional[int] = self.tokenizer( prefix + prompt_text , padding=_a , add_special_tokens=_a , return_tensors=self.framework ) _a : Union[str, Any] = prompt_text if handle_long_generation == "hole": _a : List[str] = inputs['''input_ids'''].shape[-1] if "max_new_tokens" in generate_kwargs: _a : int = generate_kwargs['''max_new_tokens'''] else: _a : List[Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError('''We cannot infer how many new tokens are expected''' ) if cur_len + new_tokens > self.tokenizer.model_max_length: _a : List[str] = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( '''We cannot use `hole` to handle this generation the number of desired tokens exceeds the''' ''' models max length''' ) _a : List[Any] = inputs['''input_ids'''][:, -keep_length:] if "attention_mask" in inputs: _a : List[str] = inputs['''attention_mask'''][:, -keep_length:] return inputs def __lowercase ( self , _a , **_a ) -> Optional[int]: _a : Any = model_inputs['''input_ids'''] _a : Optional[Any] = model_inputs.get('''attention_mask''' , _a ) # Allow empty prompts if input_ids.shape[1] == 0: _a : int = None _a : int = None _a : List[str] = 1 else: _a : List[Any] = input_ids.shape[0] _a : Union[str, Any] = model_inputs.pop('''prompt_text''' ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. _a : int = generate_kwargs.pop('''prefix_length''' , 0 ) if prefix_length > 0: _a : Tuple = '''max_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].max_new_tokens is not None ) if not has_max_new_tokens: _a : int = generate_kwargs.get('''max_length''' ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length _a : Dict = '''min_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL _a : Optional[Any] = self.model.generate(input_ids=_a , attention_mask=_a , **_a ) _a : int = generated_sequence.shape[0] if self.framework == "pt": _a : Tuple = generated_sequence.reshape(_a , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": _a : List[Any] = tf.reshape(_a , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def __lowercase ( self , _a , _a=ReturnType.FULL_TEXT , _a=True ) -> int: _a : Tuple = model_outputs['''generated_sequence'''][0] _a : int = model_outputs['''input_ids'''] _a : Any = model_outputs['''prompt_text'''] _a : Any = generated_sequence.numpy().tolist() _a : Any = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: _a : Optional[int] = {'''generated_token_ids''': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text _a : str = self.tokenizer.decode( _a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: _a : Union[str, Any] = 0 else: _a : str = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , ) ) if return_type == ReturnType.FULL_TEXT: _a : str = prompt_text + text[prompt_length:] else: _a : List[str] = text[prompt_length:] _a : Union[str, Any] = {'''generated_text''': all_text} records.append(_a ) return records
15
0
from math import sqrt import numpy as np from sympy import symbols # Coefficient # Speed of light (m/s) a__ = 299792458 # Symbols a__ , a__ , a__ , a__ = symbols('''ct x y z''') def __UpperCAmelCase ( __a : float ) -> float: """simple docstring""" if velocity > c: raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' ) elif velocity < 1: # Usually the speed should be much higher than 1 (c order of magnitude) raise ValueError('''Speed must be greater than or equal to 1!''' ) return velocity / c def __UpperCAmelCase ( __a : float ) -> float: """simple docstring""" return 1 / sqrt(1 - beta(__a ) ** 2 ) def __UpperCAmelCase ( __a : float ) -> np.ndarray: """simple docstring""" return np.array( [ [gamma(__a ), -gamma(__a ) * beta(__a ), 0, 0], [-gamma(__a ) * beta(__a ), gamma(__a ), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], ] ) def __UpperCAmelCase ( __a : float ,__a : np.ndarray | None = None ) -> np.ndarray: """simple docstring""" if event is None: _a : List[Any] = np.array([ct, x, y, z] ) # Symbolic four vector else: event[0] *= c # x0 is ct (speed of light * time) return transformation_matrix(__a ) @ event if __name__ == "__main__": import doctest doctest.testmod() # Example of symbolic vector: a__ = transform(29979245) print('''Example of four vector: ''') print(f'''ct\' = {four_vector[0]}''') print(f'''x\' = {four_vector[1]}''') print(f'''y\' = {four_vector[2]}''') print(f'''z\' = {four_vector[3]}''') # Substitute symbols with numerical values a__ = {ct: c, x: 1, y: 1, z: 1} a__ = [four_vector[i].subs(sub_dict) for i in range(4)] print(f'''\n{numerical_vector}''')
369
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.test_utils import execute_subprocess_async def __UpperCAmelCase ( __a : Dict=None ) -> str: """simple docstring""" if subparsers is not None: _a : Union[str, Any] = subparsers.add_parser('''test''' ) else: _a : List[str] = argparse.ArgumentParser('''Accelerate test command''' ) parser.add_argument( '''--config_file''' ,default=__a ,help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ) ,) if subparsers is not None: parser.set_defaults(func=__a ) return parser def __UpperCAmelCase ( __a : List[Any] ) -> Union[str, Any]: """simple docstring""" _a : Dict = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] ) if args.config_file is None: _a : List[Any] = script_name else: _a : Union[str, Any] = F"""--config_file={args.config_file} {script_name}""" _a : str = ['''accelerate-launch'''] + test_args.split() _a : str = execute_subprocess_async(__a ,env=os.environ.copy() ) if result.returncode == 0: print('''Test is a success! You are ready for your distributed training!''' ) def __UpperCAmelCase ( ) -> List[Any]: """simple docstring""" _a : Optional[int] = test_command_parser() _a : List[Any] = parser.parse_args() test_command(__a ) if __name__ == "__main__": main()
15
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) a__ = { '''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ = [ '''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GPTBigCodeForSequenceClassification''', '''GPTBigCodeForTokenClassification''', '''GPTBigCodeForCausalLM''', '''GPTBigCodeModel''', '''GPTBigCodePreTrainedModel''', ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
370
import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __lowercase ( self ) -> Union[str, Any]: _a : Optional[Any] = tempfile.mkdtemp() # fmt: off _a : Optional[int] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest'''] # fmt: on _a : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) _a : Any = { '''do_resize''': True, '''size''': {'''height''': 1_8, '''width''': 1_8}, '''do_normalize''': True, '''image_mean''': [0.5, 0.5, 0.5], '''image_std''': [0.5, 0.5, 0.5], } _a : str = os.path.join(self.tmpdirname , _a ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(_a , _a ) def __lowercase ( self , **_a ) -> Any: return BertTokenizer.from_pretrained(self.tmpdirname , **_a ) def __lowercase ( self , **_a ) -> str: return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a ) def __lowercase ( self ) -> List[Any]: shutil.rmtree(self.tmpdirname ) def __lowercase ( self ) -> Any: _a : Union[str, Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] _a : Tuple = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs] return image_inputs def __lowercase ( self ) -> str: _a : List[str] = self.get_tokenizer() _a : Tuple = self.get_image_processor() _a : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) processor.save_pretrained(self.tmpdirname ) _a : Dict = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , _a ) def __lowercase ( self ) -> Dict: _a : List[str] = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _a : Any = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) _a : List[Any] = self.get_image_processor(do_normalize=_a , padding_value=1.0 ) _a : Dict = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_a , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _a ) def __lowercase ( self ) -> Any: _a : Dict = self.get_image_processor() _a : str = self.get_tokenizer() _a : int = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) _a : List[str] = self.prepare_image_inputs() _a : List[Any] = image_processor(_a , return_tensors='''np''' ) _a : Dict = processor(images=_a , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __lowercase ( self ) -> List[str]: _a : Union[str, Any] = self.get_image_processor() _a : Dict = self.get_tokenizer() _a : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) _a : Tuple = '''lower newer''' _a : int = processor(text=_a ) _a : str = tokenizer(_a ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __lowercase ( self ) -> List[Any]: _a : Any = self.get_image_processor() _a : str = self.get_tokenizer() _a : Tuple = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) _a : List[Any] = '''lower newer''' _a : Union[str, Any] = self.prepare_image_inputs() _a : Any = processor(text=_a , images=_a ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with self.assertRaises(_a ): processor() def __lowercase ( self ) -> Optional[int]: _a : Union[str, Any] = self.get_image_processor() _a : List[str] = self.get_tokenizer() _a : Any = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) _a : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _a : int = processor.batch_decode(_a ) _a : int = tokenizer.batch_decode(_a ) self.assertListEqual(_a , _a ) def __lowercase ( self ) -> List[Any]: _a : Tuple = self.get_image_processor() _a : List[str] = self.get_tokenizer() _a : str = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) _a : Optional[int] = '''lower newer''' _a : Dict = self.prepare_image_inputs() _a : Any = processor(text=_a , images=_a ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
15
0
import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCAmelCase_ : """simple docstring""" def __init__( self , _a , _a=1_3 , _a=7 , _a=True , _a=True , _a=False , _a=True , _a=9_9 , _a=3_2 , _a=5 , _a=4 , _a=3_7 , _a="gelu" , _a=0.1 , _a=0.1 , _a=5_1_2 , _a=1_6 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ) -> Optional[Any]: _a : str = parent _a : Optional[Any] = batch_size _a : Tuple = seq_length _a : Union[str, Any] = is_training _a : Optional[Any] = use_input_mask _a : List[Any] = use_token_type_ids _a : Any = use_labels _a : Tuple = vocab_size _a : Tuple = hidden_size _a : Optional[int] = num_hidden_layers _a : List[Any] = num_attention_heads _a : List[Any] = intermediate_size _a : int = hidden_act _a : List[Any] = hidden_dropout_prob _a : str = attention_probs_dropout_prob _a : int = max_position_embeddings _a : List[Any] = type_vocab_size _a : int = type_sequence_label_size _a : str = initializer_range _a : Dict = num_labels _a : Union[str, Any] = num_choices _a : Any = scope def __lowercase ( self ) -> str: _a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _a : Any = None if self.use_input_mask: _a : Dict = random_attention_mask([self.batch_size, self.seq_length] ) _a : int = None if self.use_token_type_ids: _a : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _a : Tuple = None _a : Union[str, Any] = None _a : Any = None if self.use_labels: _a : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _a : Dict = ids_tensor([self.batch_size] , self.num_choices ) _a : Any = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowercase ( self ) -> str: return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , ) def __lowercase ( self , _a , _a , _a , _a , _a , _a , _a ) -> Optional[int]: _a : Union[str, Any] = BioGptModel(config=_a ) model.to(_a ) model.eval() _a : Optional[Any] = model(_a , attention_mask=_a ) _a : List[Any] = model(_a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowercase ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> Tuple: _a : int = BioGptForCausalLM(config=_a ) model.to(_a ) model.eval() _a : Dict = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowercase ( self , _a , _a , _a , _a , _a , *_a ) -> Union[str, Any]: _a : List[Any] = BioGptModel(config=_a ) model.to(_a ) model.eval() # create attention mask _a : List[str] = torch.ones(input_ids.shape , dtype=torch.long , device=_a ) _a : List[Any] = self.seq_length // 2 _a : Union[str, Any] = 0 # first forward pass _a : Optional[Any] = model(_a , attention_mask=_a ).to_tuple() # create hypothetical next token and extent to next_input_ids _a : Tuple = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids _a : Tuple = ids_tensor((1,) , _a ).item() + 1 _a : List[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) _a : Dict = random_other_next_tokens # append to next input_ids and attn_mask _a : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 ) _a : List[str] = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=_a )] , dim=1 , ) # get two different outputs _a : Optional[int] = model(_a , attention_mask=_a )['''last_hidden_state'''] _a : Tuple = model(_a , past_key_values=_a , attention_mask=_a )['''last_hidden_state'''] # select random slice _a : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item() _a : Any = output_from_no_past[:, -1, random_slice_idx].detach() _a : int = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_a , _a , atol=1e-3 ) ) def __lowercase ( self , _a , _a , _a , _a , _a , *_a ) -> Dict: _a : Optional[Any] = BioGptModel(config=_a ).to(_a ).eval() _a : Optional[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=_a ) # first forward pass _a : Union[str, Any] = model(_a , attention_mask=_a , use_cache=_a ) _a : str = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids _a : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) _a : Optional[Any] = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and _a : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 ) _a : int = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) _a : Optional[Any] = model(_a , attention_mask=_a )['''last_hidden_state'''] _a : int = model(_a , attention_mask=_a , past_key_values=_a )[ '''last_hidden_state''' ] # select random slice _a : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item() _a : str = output_from_no_past[:, -3:, random_slice_idx].detach() _a : Optional[int] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_a , _a , atol=1e-3 ) ) def __lowercase ( self , _a , _a , _a , _a , _a , *_a , _a=False ) -> Any: _a : List[Any] = BioGptForCausalLM(_a ) model.to(_a ) if gradient_checkpointing: model.gradient_checkpointing_enable() _a : Dict = model(_a , labels=_a ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def __lowercase ( self , _a , *_a ) -> str: _a : Union[str, Any] = BioGptModel(_a ) _a : Union[str, Any] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 ) def __lowercase ( self , _a , _a , _a , _a , _a , *_a ) -> Union[str, Any]: _a : Optional[Any] = self.num_labels _a : str = BioGptForTokenClassification(_a ) model.to(_a ) model.eval() _a : int = model(_a , attention_mask=_a , token_type_ids=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowercase ( self ) -> List[str]: _a : List[Any] = self.prepare_config_and_inputs() ( _a ) : Union[str, Any] = config_and_inputs _a : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class UpperCAmelCase_ ( __lowercase , __lowercase , __lowercase , unittest.TestCase ): """simple docstring""" UpperCAmelCase__ : int = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) UpperCAmelCase__ : Optional[Any] = (BioGptForCausalLM,) if is_torch_available() else () UpperCAmelCase__ : int = ( { "feature-extraction": BioGptModel, "text-classification": BioGptForSequenceClassification, "text-generation": BioGptForCausalLM, "token-classification": BioGptForTokenClassification, "zero-shot": BioGptForSequenceClassification, } if is_torch_available() else {} ) UpperCAmelCase__ : Optional[Any] = False def __lowercase ( self ) -> str: _a : str = BioGptModelTester(self ) _a : Any = ConfigTester(self , config_class=_a , hidden_size=3_7 ) def __lowercase ( self ) -> List[str]: self.config_tester.run_common_tests() def __lowercase ( self ) -> Any: _a : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_a ) def __lowercase ( self ) -> Any: _a : Optional[int] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _a : Any = type self.model_tester.create_and_check_model(*_a ) def __lowercase ( self ) -> List[Any]: _a : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*_a ) def __lowercase ( self ) -> Optional[Any]: _a : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*_a , gradient_checkpointing=_a ) def __lowercase ( self ) -> Optional[Any]: _a : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*_a ) def __lowercase ( self ) -> List[str]: _a : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*_a ) def __lowercase ( self ) -> Optional[int]: _a : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*_a ) @slow def __lowercase ( self ) -> List[Any]: _a : List[str] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' ) model.to(_a ) _a : Tuple = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) _a : Optional[Any] = '''left''' # Define PAD Token = EOS Token = 50256 _a : Dict = tokenizer.eos_token _a : Tuple = model.config.eos_token_id # use different length sentences to test batching _a : str = [ '''Hello, my dog is a little''', '''Today, I''', ] _a : Tuple = tokenizer(_a , return_tensors='''pt''' , padding=_a ) _a : Optional[int] = inputs['''input_ids'''].to(_a ) _a : int = model.generate( input_ids=_a , attention_mask=inputs['''attention_mask'''].to(_a ) , ) _a : str = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(_a ) _a : Optional[int] = model.generate(input_ids=_a ) _a : Any = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item() _a : Any = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(_a ) _a : List[str] = model.generate(input_ids=_a , max_length=model.config.max_length - num_paddings ) _a : int = tokenizer.batch_decode(_a , skip_special_tokens=_a ) _a : List[str] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_a ) _a : Dict = tokenizer.decode(output_padded[0] , skip_special_tokens=_a ) _a : Optional[int] = [ '''Hello, my dog is a little bit bigger than a little bit.''', '''Today, I have a good idea of how to use the information''', ] self.assertListEqual(_a , _a ) self.assertListEqual(_a , [non_padded_sentence, padded_sentence] ) @slow def __lowercase ( self ) -> Any: for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _a : int = BioGptModel.from_pretrained(_a ) self.assertIsNotNone(_a ) def __lowercase ( self ) -> Union[str, Any]: _a : Any = self.model_tester.prepare_config_and_inputs_for_common() _a : List[Any] = 3 _a : str = input_dict['''input_ids'''] _a : Any = input_ids.ne(1 ).to(_a ) _a : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) _a : Any = BioGptForSequenceClassification(_a ) model.to(_a ) model.eval() _a : Tuple = model(_a , attention_mask=_a , labels=_a ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __lowercase ( self ) -> Any: _a : Dict = self.model_tester.prepare_config_and_inputs_for_common() _a : Tuple = 3 _a : Union[str, Any] = '''multi_label_classification''' _a : Optional[int] = input_dict['''input_ids'''] _a : Optional[int] = input_ids.ne(1 ).to(_a ) _a : Union[str, Any] = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) _a : str = BioGptForSequenceClassification(_a ) model.to(_a ) model.eval() _a : Optional[int] = model(_a , attention_mask=_a , labels=_a ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" @slow def __lowercase ( self ) -> Dict: _a : Optional[Any] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' ) _a : List[str] = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]] ) _a : List[str] = model(_a )[0] _a : List[Any] = 4_2_3_8_4 _a : Tuple = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , _a ) _a : Dict = torch.tensor( [[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1e-4 ) ) @slow def __lowercase ( self ) -> Dict: _a : Optional[int] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) _a : Any = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' ) model.to(_a ) torch.manual_seed(0 ) _a : Tuple = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(_a ) _a : Optional[int] = model.generate( **_a , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=_a , ) _a : Optional[int] = tokenizer.decode(output_ids[0] , skip_special_tokens=_a ) _a : Optional[int] = ( '''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the''' ''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and''' ''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),''' ''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and''' ''' more than 800,000 deaths.''' ) self.assertEqual(_a , _a )
371
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( HubertConfig, HubertForCTC, HubertModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() a__ = logging.get_logger(__name__) a__ = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[int] ,__a : Optional[int] ,__a : List[str] ,__a : List[Any] ) -> Union[str, Any]: """simple docstring""" for attribute in key.split('''.''' ): _a : Optional[Any] = getattr(__a ,__a ) if weight_type is not None: _a : Dict = getattr(__a ,__a ).shape else: _a : Optional[int] = hf_pointer.shape assert hf_shape == value.shape, ( F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": _a : List[Any] = value elif weight_type == "weight_g": _a : Any = value elif weight_type == "weight_v": _a : Union[str, Any] = value elif weight_type == "bias": _a : Optional[int] = value else: _a : List[Any] = value logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def __UpperCAmelCase ( __a : Any ,__a : Union[str, Any] ,__a : Union[str, Any] ) -> int: """simple docstring""" _a : Union[str, Any] = [] _a : Union[str, Any] = fairseq_model.state_dict() _a : Union[str, Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): _a : int = False if "conv_layers" in name: load_conv_layer( __a ,__a ,__a ,__a ,hf_model.config.feat_extract_norm == '''group''' ,) _a : Optional[Any] = True else: for key, mapped_key in MAPPING.items(): _a : Union[str, Any] = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned): _a : Any = True if "*" in mapped_key: _a : Optional[int] = name.split(__a )[0].split('''.''' )[-2] _a : Any = mapped_key.replace('''*''' ,__a ) if "weight_g" in name: _a : List[Any] = '''weight_g''' elif "weight_v" in name: _a : List[str] = '''weight_v''' elif "weight" in name: _a : Any = '''weight''' elif "bias" in name: _a : str = '''bias''' else: _a : Any = None set_recursively(__a ,__a ,__a ,__a ,__a ) continue if not is_used: unused_weights.append(__a ) logger.warning(F"""Unused weights: {unused_weights}""" ) def __UpperCAmelCase ( __a : int ,__a : Optional[Any] ,__a : Dict ,__a : List[str] ,__a : Any ) -> Tuple: """simple docstring""" _a : int = full_name.split('''conv_layers.''' )[-1] _a : Any = name.split('''.''' ) _a : List[Any] = int(items[0] ) _a : Optional[int] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) _a : Optional[int] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) _a : Optional[Any] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) _a : int = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) _a : Any = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(__a ) @torch.no_grad() def __UpperCAmelCase ( __a : Dict ,__a : List[Any] ,__a : List[str]=None ,__a : Optional[int]=None ,__a : int=True ) -> List[Any]: """simple docstring""" if config_path is not None: _a : Tuple = HubertConfig.from_pretrained(__a ) else: _a : Any = HubertConfig() if is_finetuned: if dict_path: _a : Tuple = Dictionary.load(__a ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _a : Any = target_dict.pad_index _a : Tuple = target_dict.bos_index _a : Optional[int] = target_dict.eos_index _a : Optional[Any] = len(target_dict.symbols ) _a : Tuple = os.path.join(__a ,'''vocab.json''' ) if not os.path.isdir(__a ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__a ) ) return os.makedirs(__a ,exist_ok=__a ) with open(__a ,'''w''' ,encoding='''utf-8''' ) as vocab_handle: json.dump(target_dict.indices ,__a ) _a : Tuple = WavaVecaCTCTokenizer( __a ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token='''|''' ,do_lower_case=__a ,) _a : Tuple = True if config.feat_extract_norm == '''layer''' else False _a : List[Any] = WavaVecaFeatureExtractor( feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=__a ,return_attention_mask=__a ,) _a : List[Any] = WavaVecaProcessor(feature_extractor=__a ,tokenizer=__a ) processor.save_pretrained(__a ) _a : Tuple = HubertForCTC(__a ) else: _a : Tuple = HubertModel(__a ) if is_finetuned: _a , _a , _a : int = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] ,arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: _a , _a , _a : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) _a : Any = model[0].eval() recursively_load_weights(__a ,__a ,__a ) hf_wavavec.save_pretrained(__a ) if __name__ == "__main__": a__ = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) a__ = parser.parse_args() convert_hubert_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
15
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a__ = {'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ = [ '''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''WavLMForAudioFrameClassification''', '''WavLMForCTC''', '''WavLMForSequenceClassification''', '''WavLMForXVector''', '''WavLMModel''', '''WavLMPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavlm import ( WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST, WavLMForAudioFrameClassification, WavLMForCTC, WavLMForSequenceClassification, WavLMForXVector, WavLMModel, WavLMPreTrainedModel, ) else: import sys a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
350
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : List[str] = ["image_processor", "tokenizer"] UpperCAmelCase__ : str = "ViltImageProcessor" UpperCAmelCase__ : Union[str, Any] = ("BertTokenizer", "BertTokenizerFast") def __init__( self , _a=None , _a=None , **_a ) -> Any: _a : Union[str, Any] = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , _a , ) _a : Dict = kwargs.pop('''feature_extractor''' ) _a : Optional[int] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(_a , _a ) _a : int = self.image_processor def __call__( self , _a , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ) -> BatchEncoding: _a : Tuple = self.tokenizer( text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , ) # add pixel_values + pixel_mask _a : str = self.image_processor(_a , return_tensors=_a ) encoding.update(_a ) return encoding def __lowercase ( self , *_a , **_a ) -> Optional[Any]: return self.tokenizer.batch_decode(*_a , **_a ) def __lowercase ( self , *_a , **_a ) -> str: return self.tokenizer.decode(*_a , **_a ) @property def __lowercase ( self ) -> Optional[int]: _a : str = self.tokenizer.model_input_names _a : Optional[Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def __lowercase ( self ) -> Optional[Any]: warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , ) return self.image_processor_class @property def __lowercase ( self ) -> Any: warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , ) return self.image_processor
15
0
import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss a__ = pytest.mark.integration @require_faiss class UpperCAmelCase_ ( __lowercase ): """simple docstring""" def __lowercase ( self ) -> List[Any]: _a : Tuple = Dataset.from_dict({'''filename''': ['''my_name-train''' + '''_''' + str(_a ) for x in np.arange(3_0 ).tolist()]} ) return dset def __lowercase ( self ) -> List[Any]: import faiss _a : Dataset = self._create_dummy_dataset() _a : Union[str, Any] = dset.map( lambda _a , _a : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=_a , keep_in_memory=_a ) _a : List[str] = dset.add_faiss_index('''vecs''' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT ) _a : Optional[Any] = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' ) dset.drop_index('''vecs''' ) def __lowercase ( self ) -> Optional[int]: import faiss _a : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='''vecs''' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT , ) _a : Union[str, Any] = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' ) def __lowercase ( self ) -> str: import faiss _a : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='''vecs''' , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=_a ) as tmp_file: dset.save_faiss_index('''vecs''' , tmp_file.name ) dset.load_faiss_index('''vecs2''' , tmp_file.name ) os.unlink(tmp_file.name ) _a : str = dset.get_nearest_examples('''vecs2''' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' ) def __lowercase ( self ) -> Optional[int]: _a : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='''vecs''' ) dset.drop_index('''vecs''' ) self.assertRaises(_a , partial(dset.get_nearest_examples , '''vecs2''' , np.ones(5 , dtype=np.floataa ) ) ) def __lowercase ( self ) -> Any: from elasticsearch import Elasticsearch _a : Dataset = self._create_dummy_dataset() with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch( '''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk: _a : Dict = {'''acknowledged''': True} mocked_bulk.return_value([(True, None)] * 3_0 ) _a : Tuple = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 2_9}]}} _a : Optional[int] = Elasticsearch() dset.add_elasticsearch_index('''filename''' , es_client=_a ) _a : Optional[Any] = dset.get_nearest_examples('''filename''' , '''my_name-train_29''' ) self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' ) @require_faiss class UpperCAmelCase_ ( __lowercase ): """simple docstring""" def __lowercase ( self ) -> Tuple: import faiss _a : Tuple = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsNotNone(index.faiss_index ) self.assertEqual(index.faiss_index.ntotal , 5 ) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) ) self.assertEqual(index.faiss_index.ntotal , 1_0 ) # single query _a : Optional[int] = np.zeros(5 , dtype=np.floataa ) _a : Optional[int] = 1 _a : int = index.search(_a ) self.assertRaises(_a , index.search , query.reshape(-1 , 1 ) ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) # batched queries _a : Dict = np.eye(5 , dtype=np.floataa )[::-1] _a : Any = index.search_batch(_a ) self.assertRaises(_a , index.search_batch , queries[0] ) _a : Dict = [scores[0] for scores in total_scores] _a : Any = [indices[0] for indices in total_indices] self.assertGreater(np.min(_a ) , 0 ) self.assertListEqual([4, 3, 2, 1, 0] , _a ) def __lowercase ( self ) -> str: import faiss _a : int = FaissIndex(string_factory='''Flat''' ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) _a : Optional[Any] = FaissIndex(string_factory='''LSH''' ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexLSH ) with self.assertRaises(_a ): _a : List[str] = FaissIndex(string_factory='''Flat''' , custom_index=faiss.IndexFlat(5 ) ) def __lowercase ( self ) -> str: import faiss _a : int = faiss.IndexFlat(5 ) _a : str = FaissIndex(custom_index=_a ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) def __lowercase ( self ) -> str: import faiss _a : Tuple = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=_a ) as tmp_file: index.save(tmp_file.name ) _a : Union[str, Any] = FaissIndex.load(tmp_file.name ) os.unlink(tmp_file.name ) _a : List[Any] = np.zeros(5 , dtype=np.floataa ) _a : int = 1 _a : Union[str, Any] = index.search(_a ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) @require_faiss def __UpperCAmelCase ( __a : Optional[int] ) -> List[Any]: """simple docstring""" import faiss _a : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 ,dtype=np.floataa ) ) _a : str = '''index.faiss''' _a : int = F"""mock://{index_name}""" index.save(__a ,storage_options=mockfs.storage_options ) _a : int = FaissIndex.load(__a ,storage_options=mockfs.storage_options ) _a : Tuple = np.zeros(5 ,dtype=np.floataa ) _a : str = 1 _a : Union[str, Any] = index.search(__a ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class UpperCAmelCase_ ( __lowercase ): """simple docstring""" def __lowercase ( self ) -> List[str]: from elasticsearch import Elasticsearch with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch( '''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk: _a : int = Elasticsearch() _a : List[Any] = {'''acknowledged''': True} _a : List[str] = ElasticSearchIndex(es_client=_a ) mocked_bulk.return_value([(True, None)] * 3 ) index.add_documents(['''foo''', '''bar''', '''foobar'''] ) # single query _a : str = '''foo''' _a : List[str] = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}} _a : Optional[Any] = index.search(_a ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # single query with timeout _a : Any = '''foo''' _a : List[str] = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}} _a : Optional[int] = index.search(_a , request_timeout=3_0 ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # batched queries _a : str = ['''foo''', '''bar''', '''foobar'''] _a : Optional[int] = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}} _a : Union[str, Any] = index.search_batch(_a ) _a : Union[str, Any] = [scores[0] for scores in total_scores] _a : Optional[int] = [indices[0] for indices in total_indices] self.assertGreater(np.min(_a ) , 0 ) self.assertListEqual([1, 1, 1] , _a ) # batched queries with timeout _a : Tuple = ['''foo''', '''bar''', '''foobar'''] _a : str = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}} _a : int = index.search_batch(_a , request_timeout=3_0 ) _a : Tuple = [scores[0] for scores in total_scores] _a : Optional[int] = [indices[0] for indices in total_indices] self.assertGreater(np.min(_a ) , 0 ) self.assertListEqual([1, 1, 1] , _a )
351
from math import ceil def __UpperCAmelCase ( __a : int = 1_001 ) -> int: """simple docstring""" _a : Dict = 1 for i in range(1 ,int(ceil(n / 2.0 ) ) ): _a : int = 2 * i + 1 _a : str = 2 * i _a : Any = total + 4 * odd**2 - 6 * even return total if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution()) else: try: a__ = int(sys.argv[1]) print(solution(n)) except ValueError: print('''Invalid entry - please enter a number''')
15
0
"""simple docstring""" import os import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers.models.realm.configuration_realm import RealmConfig from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer class UpperCAmelCase_ ( __lowercase ): """simple docstring""" def __lowercase ( self ) -> Tuple: _a : Tuple = tempfile.mkdtemp() _a : List[str] = 5 # Realm tok _a : Optional[int] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''test''', '''question''', '''this''', '''is''', '''the''', '''first''', '''second''', '''third''', '''fourth''', '''fifth''', '''record''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] _a : Optional[int] = os.path.join(self.tmpdirname , '''realm_tokenizer''' ) os.makedirs(_a , exist_ok=_a ) _a : str = os.path.join(_a , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) _a : List[str] = os.path.join(self.tmpdirname , '''realm_block_records''' ) os.makedirs(_a , exist_ok=_a ) def __lowercase ( self ) -> RealmTokenizer: return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) ) def __lowercase ( self ) -> str: shutil.rmtree(self.tmpdirname ) def __lowercase ( self ) -> List[str]: _a : int = RealmConfig(num_block_records=self.num_block_records ) return config def __lowercase ( self ) -> Optional[int]: _a : Optional[Any] = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''question''': ['''foo''', '''bar'''], '''answers''': [['''Foo''', '''Bar'''], ['''Bar''']], } ) return dataset def __lowercase ( self ) -> Dict: _a : Any = np.array( [ b'''This is the first record''', b'''This is the second record''', b'''This is the third record''', b'''This is the fourth record''', b'''This is the fifth record''', b'''This is a longer longer longer record''', ] , dtype=_a , ) return block_records def __lowercase ( self ) -> Optional[int]: _a : List[str] = RealmRetriever( block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , ) return retriever def __lowercase ( self ) -> Optional[int]: _a : Dict = self.get_config() _a : Tuple = self.get_dummy_retriever() _a : Dict = retriever.tokenizer _a : Union[str, Any] = np.array([0, 3] , dtype='''long''' ) _a : Any = tokenizer(['''Test question'''] ).input_ids _a : int = tokenizer( ['''the fourth'''] , add_special_tokens=_a , return_token_type_ids=_a , return_attention_mask=_a , ).input_ids _a : str = config.reader_seq_len _a : Any = retriever( _a , _a , answer_ids=_a , max_length=_a , return_tensors='''np''' ) self.assertEqual(len(_a ) , 2 ) self.assertEqual(len(_a ) , 2 ) self.assertEqual(len(_a ) , 2 ) self.assertEqual(concat_inputs.input_ids.shape , (2, 1_0) ) self.assertEqual(concat_inputs.attention_mask.shape , (2, 1_0) ) self.assertEqual(concat_inputs.token_type_ids.shape , (2, 1_0) ) self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 1_0) ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , ) def __lowercase ( self ) -> Union[str, Any]: _a : Optional[int] = self.get_config() _a : Union[str, Any] = self.get_dummy_retriever() _a : Tuple = retriever.tokenizer _a : List[str] = np.array([0, 3, 5] , dtype='''long''' ) _a : int = tokenizer(['''Test question'''] ).input_ids _a : List[str] = tokenizer( ['''the fourth''', '''longer longer'''] , add_special_tokens=_a , return_token_type_ids=_a , return_attention_mask=_a , ).input_ids _a : Union[str, Any] = config.reader_seq_len _a : List[Any] = retriever( _a , _a , answer_ids=_a , max_length=_a , return_tensors='''np''' ) self.assertEqual([False, True, True] , _a ) self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , _a ) self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , _a ) def __lowercase ( self ) -> int: _a : Any = self.get_dummy_retriever() retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) ) # Test local path _a : str = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) ) self.assertEqual(retriever.block_records[0] , b'''This is the first record''' ) # Test mocked remote path with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download: _a : Tuple = os.path.join( os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME ) _a : str = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' ) self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
352
from typing import Dict, Iterable, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract a__ = logging.get_logger(__name__) def __UpperCAmelCase ( __a : Union[str, Any] ,__a : str ,__a : Union[str, Any] ) -> List[str]: """simple docstring""" return [ int(1_000 * (box[0] / width) ), int(1_000 * (box[1] / height) ), int(1_000 * (box[2] / width) ), int(1_000 * (box[3] / height) ), ] def __UpperCAmelCase ( __a : np.ndarray ,__a : Optional[str] ,__a : Optional[str] ) -> List[Any]: """simple docstring""" _a : str = to_pil_image(__a ) _a , _a : Optional[Any] = pil_image.size _a : Tuple = pytesseract.image_to_data(__a ,lang=__a ,output_type='''dict''' ,config=__a ) _a , _a , _a , _a , _a : List[str] = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height'''] # filter empty words and corresponding coordinates _a : Dict = [idx for idx, word in enumerate(__a ) if not word.strip()] _a : str = [word for idx, word in enumerate(__a ) if idx not in irrelevant_indices] _a : List[str] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices] _a : Union[str, Any] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices] _a : str = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices] _a : Union[str, Any] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format _a : int = [] for x, y, w, h in zip(__a ,__a ,__a ,__a ): _a : List[str] = [x, y, x + w, y + h] actual_boxes.append(__a ) # finally, normalize the bounding boxes _a : Dict = [] for box in actual_boxes: normalized_boxes.append(normalize_box(__a ,__a ,__a ) ) assert len(__a ) == len(__a ), "Not as many words as there are bounding boxes" return words, normalized_boxes class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : Optional[int] = ["pixel_values"] def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = True , _a = 1 / 2_5_5 , _a = True , _a = None , _a = None , _a = True , _a = None , _a = "" , **_a , ) -> None: super().__init__(**_a ) _a : List[str] = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4} _a : Union[str, Any] = get_size_dict(_a ) _a : int = do_resize _a : Optional[int] = size _a : str = resample _a : str = do_rescale _a : Any = rescale_value _a : Optional[Any] = do_normalize _a : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _a : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD _a : List[Any] = apply_ocr _a : Optional[int] = ocr_lang _a : Tuple = tesseract_config def __lowercase ( self , _a , _a , _a = PILImageResampling.BILINEAR , _a = None , **_a , ) -> np.ndarray: _a : Any = get_size_dict(_a ) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" ) _a : Optional[int] = (size['''height'''], size['''width''']) return resize(_a , size=_a , resample=_a , data_format=_a , **_a ) def __lowercase ( self , _a , _a , _a = None , **_a , ) -> np.ndarray: return rescale(_a , scale=_a , data_format=_a , **_a ) def __lowercase ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray: return normalize(_a , mean=_a , std=_a , data_format=_a , **_a ) def __lowercase ( self , _a , _a = None , _a = None , _a=None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> PIL.Image.Image: _a : Optional[int] = do_resize if do_resize is not None else self.do_resize _a : Union[str, Any] = size if size is not None else self.size _a : Any = get_size_dict(_a ) _a : List[str] = resample if resample is not None else self.resample _a : int = do_rescale if do_rescale is not None else self.do_rescale _a : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor _a : int = do_normalize if do_normalize is not None else self.do_normalize _a : str = image_mean if image_mean is not None else self.image_mean _a : Tuple = image_std if image_std is not None else self.image_std _a : Any = apply_ocr if apply_ocr is not None else self.apply_ocr _a : int = ocr_lang if ocr_lang is not None else self.ocr_lang _a : Optional[int] = tesseract_config if tesseract_config is not None else self.tesseract_config _a : List[Any] = make_list_of_images(_a ) if not valid_images(_a ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' ) # All transformations expect numpy arrays. _a : Any = [to_numpy_array(_a ) for image in images] # Tesseract OCR to get words + normalized bounding boxes if apply_ocr: requires_backends(self , '''pytesseract''' ) _a : str = [] _a : str = [] for image in images: _a , _a : Union[str, Any] = apply_tesseract(_a , _a , _a ) words_batch.append(_a ) boxes_batch.append(_a ) if do_resize: _a : List[str] = [self.resize(image=_a , size=_a , resample=_a ) for image in images] if do_rescale: _a : Optional[Any] = [self.rescale(image=_a , scale=_a ) for image in images] if do_normalize: _a : List[Any] = [self.normalize(image=_a , mean=_a , std=_a ) for image in images] _a : List[str] = [to_channel_dimension_format(_a , _a ) for image in images] _a : List[str] = BatchFeature(data={'''pixel_values''': images} , tensor_type=_a ) if apply_ocr: _a : Optional[int] = words_batch _a : List[Any] = boxes_batch return data
15
0
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __lowercase ( self ) -> Union[str, Any]: _a : Optional[Any] = tempfile.mkdtemp() # fmt: off _a : Optional[int] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest'''] # fmt: on _a : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) _a : Any = { '''do_resize''': True, '''size''': {'''height''': 1_8, '''width''': 1_8}, '''do_normalize''': True, '''image_mean''': [0.5, 0.5, 0.5], '''image_std''': [0.5, 0.5, 0.5], } _a : str = os.path.join(self.tmpdirname , _a ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(_a , _a ) def __lowercase ( self , **_a ) -> Any: return BertTokenizer.from_pretrained(self.tmpdirname , **_a ) def __lowercase ( self , **_a ) -> str: return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a ) def __lowercase ( self ) -> List[Any]: shutil.rmtree(self.tmpdirname ) def __lowercase ( self ) -> Any: _a : Union[str, Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] _a : Tuple = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs] return image_inputs def __lowercase ( self ) -> str: _a : List[str] = self.get_tokenizer() _a : Tuple = self.get_image_processor() _a : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) processor.save_pretrained(self.tmpdirname ) _a : Dict = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , _a ) def __lowercase ( self ) -> Dict: _a : List[str] = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _a : Any = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) _a : List[Any] = self.get_image_processor(do_normalize=_a , padding_value=1.0 ) _a : Dict = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_a , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _a ) def __lowercase ( self ) -> Any: _a : Dict = self.get_image_processor() _a : str = self.get_tokenizer() _a : int = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) _a : List[str] = self.prepare_image_inputs() _a : List[Any] = image_processor(_a , return_tensors='''np''' ) _a : Dict = processor(images=_a , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __lowercase ( self ) -> List[str]: _a : Union[str, Any] = self.get_image_processor() _a : Dict = self.get_tokenizer() _a : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) _a : Tuple = '''lower newer''' _a : int = processor(text=_a ) _a : str = tokenizer(_a ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __lowercase ( self ) -> List[Any]: _a : Any = self.get_image_processor() _a : str = self.get_tokenizer() _a : Tuple = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) _a : List[Any] = '''lower newer''' _a : Union[str, Any] = self.prepare_image_inputs() _a : Any = processor(text=_a , images=_a ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with self.assertRaises(_a ): processor() def __lowercase ( self ) -> Optional[int]: _a : Union[str, Any] = self.get_image_processor() _a : List[str] = self.get_tokenizer() _a : Any = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) _a : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _a : int = processor.batch_decode(_a ) _a : int = tokenizer.batch_decode(_a ) self.assertListEqual(_a , _a ) def __lowercase ( self ) -> List[Any]: _a : Tuple = self.get_image_processor() _a : List[str] = self.get_tokenizer() _a : str = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a ) _a : Optional[int] = '''lower newer''' _a : Dict = self.prepare_image_inputs() _a : Any = processor(text=_a , images=_a ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
353
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def __UpperCAmelCase ( ) -> Optional[Any]: """simple docstring""" _a : int = ArgumentParser('''Accelerate CLI tool''' ,usage='''accelerate <command> [<args>]''' ,allow_abbrev=__a ) _a : Optional[int] = parser.add_subparsers(help='''accelerate command helpers''' ) # Register commands get_config_parser(subparsers=__a ) env_command_parser(subparsers=__a ) launch_command_parser(subparsers=__a ) tpu_command_parser(subparsers=__a ) test_command_parser(subparsers=__a ) # Let's go _a : Dict = parser.parse_args() if not hasattr(__a ,'''func''' ): parser.print_help() exit(1 ) # Run args.func(__a ) if __name__ == "__main__": main()
15
0