code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE : Union[str, Any] = { "configuration_electra": ["ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig", "ElectraOnnxConfig"], "tokenization_electra": ["ElectraTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Tuple = ["ElectraTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Optional[int] = [ "ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST", "ElectraForCausalLM", "ElectraForMaskedLM", "ElectraForMultipleChoice", "ElectraForPreTraining", "ElectraForQuestionAnswering", "ElectraForSequenceClassification", "ElectraForTokenClassification", "ElectraModel", "ElectraPreTrainedModel", "load_tf_weights_in_electra", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Union[str, Any] = [ "TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST", "TFElectraForMaskedLM", "TFElectraForMultipleChoice", "TFElectraForPreTraining", "TFElectraForQuestionAnswering", "TFElectraForSequenceClassification", "TFElectraForTokenClassification", "TFElectraModel", "TFElectraPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Union[str, Any] = [ "FlaxElectraForCausalLM", "FlaxElectraForMaskedLM", "FlaxElectraForMultipleChoice", "FlaxElectraForPreTraining", "FlaxElectraForQuestionAnswering", "FlaxElectraForSequenceClassification", "FlaxElectraForTokenClassification", "FlaxElectraModel", "FlaxElectraPreTrainedModel", ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
21
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) _lowerCamelCase : int = { """configuration_blip""": [ """BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BlipConfig""", """BlipTextConfig""", """BlipVisionConfig""", ], """processing_blip""": ["""BlipProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Tuple = ["""BlipImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : List[Any] = [ """BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """BlipModel""", """BlipPreTrainedModel""", """BlipForConditionalGeneration""", """BlipForQuestionAnswering""", """BlipVisionModel""", """BlipTextModel""", """BlipForImageTextRetrieval""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Optional[Any] = [ """TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBlipModel""", """TFBlipPreTrainedModel""", """TFBlipForConditionalGeneration""", """TFBlipForQuestionAnswering""", """TFBlipVisionModel""", """TFBlipTextModel""", """TFBlipForImageTextRetrieval""", ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys _lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
14
0
'''simple docstring''' from __future__ import annotations from collections.abc import Callable from typing import Any, Generic, TypeVar __SCREAMING_SNAKE_CASE :Optional[int] = TypeVar('''T''') class A_ ( Generic[T] ): def __init__( self : List[Any] , snake_case_ : list[T] , snake_case_ : Callable[[T, T], T] ): _UpperCAmelCase = None _UpperCAmelCase = len(snake_case_ ) _UpperCAmelCase = [any_type for _ in range(self.N )] + arr _UpperCAmelCase = fnc self.build() def lowercase ( self : List[Any] ): for p in range(self.N - 1 , 0 , -1 ): _UpperCAmelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def lowercase ( self : Optional[Any] , snake_case_ : int , snake_case_ : T ): p += self.N _UpperCAmelCase = v while p > 1: _UpperCAmelCase = p // 2 _UpperCAmelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def lowercase ( self : Any , snake_case_ : int , snake_case_ : int ): # noqa: E741 _UpperCAmelCase , _UpperCAmelCase = l + self.N, r + self.N _UpperCAmelCase = None while l <= r: if l % 2 == 1: _UpperCAmelCase = self.st[l] if res is None else self.fn(snake_case_ , self.st[l] ) if r % 2 == 0: _UpperCAmelCase = self.st[r] if res is None else self.fn(snake_case_ , self.st[r] ) _UpperCAmelCase , _UpperCAmelCase = (l + 1) // 2, (r - 1) // 2 return res if __name__ == "__main__": from functools import reduce __SCREAMING_SNAKE_CASE :Union[str, Any] = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12] __SCREAMING_SNAKE_CASE :List[str] = { 0: 7, 1: 2, 2: 6, 3: -14, 4: 5, 5: 4, 6: 7, 7: -10, 8: 9, 9: 10, 10: 12, 11: 1, } __SCREAMING_SNAKE_CASE :Any = SegmentTree(test_array, min) __SCREAMING_SNAKE_CASE :Any = SegmentTree(test_array, max) __SCREAMING_SNAKE_CASE :Any = SegmentTree(test_array, lambda a, b: a + b) def UpperCAmelCase_ ( ) -> None: '''simple docstring''' for i in range(len(__lowercase ) ): for j in range(__lowercase , len(__lowercase ) ): _UpperCAmelCase = reduce(__lowercase , test_array[i : j + 1] ) _UpperCAmelCase = reduce(__lowercase , test_array[i : j + 1] ) _UpperCAmelCase = reduce(lambda __lowercase , __lowercase : a + b , test_array[i : j + 1] ) assert min_range == min_segment_tree.query(__lowercase , __lowercase ) assert max_range == max_segment_tree.query(__lowercase , __lowercase ) assert sum_range == sum_segment_tree.query(__lowercase , __lowercase ) test_all_segments() for index, value in test_updates.items(): __SCREAMING_SNAKE_CASE :str = value min_segment_tree.update(index, value) max_segment_tree.update(index, value) sum_segment_tree.update(index, value) test_all_segments()
22
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCamelCase : List[str] = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : List[Any] = [ """VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""", """ViTMSNModel""", """ViTMSNForImageClassification""", """ViTMSNPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys _lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
14
0
'''simple docstring''' from __future__ import annotations def snake_case_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float ) -> dict[str, float]: if (voltage, current, resistance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if resistance < 0: raise ValueError('''Resistance cannot be negative''' ) if voltage == 0: return {"voltage": float(current * resistance )} elif current == 0: return {"current": voltage / resistance} elif resistance == 0: return {"resistance": voltage / current} else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
23
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> "list[int]": """simple docstring""" if upper_limit < 0: raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' ) A__ = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 A__ = 1 if upper_limit > 0: A__ = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(lowercase_ ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""") print("""\n*** Enter -1 at any time to quit ***""") print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""") try: while True: _lowerCamelCase : List[Any] = int(input().strip()) if N < 0: print("""\n********* Goodbye!! ************""") break else: print(F'''The Catalan numbers from 0 through {N} are:''') print(catalan_numbers(N)) print("""Try another upper limit for the sequence: """, end="""""") except (NameError, ValueError): print("""\n********* Invalid input, goodbye! ************\n""") import doctest doctest.testmod()
14
0
import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast snake_case_ = datasets.utils.logging.get_logger(__name__) @dataclass class SCREAMING_SNAKE_CASE__ ( datasets.BuilderConfig ): A_ : int = 10_000 A_ : Optional[List[str]] = None A_ : Optional[datasets.Features] = None class SCREAMING_SNAKE_CASE__ ( datasets.ArrowBasedBuilder ): A_ : Optional[Any] = ParquetConfig def a (self : Optional[int] ): """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def a (self : List[str] , a__ : List[str] ): """simple docstring""" if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) __snake_case = dl_manager.download_and_extract(self.config.data_files ) if isinstance(a__ , (str, list, tuple) ): __snake_case = data_files if isinstance(a__ , a__ ): __snake_case = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __snake_case = [dl_manager.iter_files(a__ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] __snake_case = [] for split_name, files in data_files.items(): if isinstance(a__ , a__ ): __snake_case = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __snake_case = [dl_manager.iter_files(a__ ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(a__ ): with open(a__ , '''rb''' ) as f: __snake_case = datasets.Features.from_arrow_schema(pq.read_schema(a__ ) ) break splits.append(datasets.SplitGenerator(name=a__ , gen_kwargs={'''files''': files} ) ) return splits def a (self : str , a__ : pa.Table ): """simple docstring""" if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example __snake_case = table_cast(a__ , self.info.features.arrow_schema ) return pa_table def a (self : Any , a__ : Dict ): """simple docstring""" __snake_case = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( f"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" ) for file_idx, file in enumerate(itertools.chain.from_iterable(a__ ) ): with open(a__ , '''rb''' ) as f: __snake_case = pq.ParquetFile(a__ ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ): __snake_case = pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f"""{file_idx}_{batch_idx}""", self._cast_table(a__ ) except ValueError as e: logger.error(f"""Failed to read file '{file}' with error {type(a__ )}: {e}""" ) raise
24
import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict: """simple docstring""" A__ = args.pruning_method A__ = args.threshold A__ = args.model_name_or_path.rstrip('''/''' ) A__ = args.target_model_path print(f"""Load fine-pruned model from {model_name_or_path}""" ) A__ = torch.load(os.path.join(lowercase_ , '''pytorch_model.bin''' ) ) A__ = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: A__ = tensor print(f"""Copied layer {name}""" ) elif "classifier" in name or "qa_output" in name: A__ = tensor print(f"""Copied layer {name}""" ) elif "bias" in name: A__ = tensor print(f"""Copied layer {name}""" ) else: if pruning_method == "magnitude": A__ = MagnitudeBinarizer.apply(inputs=lowercase_ , threshold=lowercase_ ) A__ = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "topK": if "mask_scores" in name: continue A__ = name[:-6] A__ = model[f"""{prefix_}mask_scores"""] A__ = TopKBinarizer.apply(lowercase_ , lowercase_ ) A__ = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue A__ = name[:-6] A__ = model[f"""{prefix_}mask_scores"""] A__ = ThresholdBinarizer.apply(lowercase_ , lowercase_ , lowercase_ ) A__ = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "l0": if "mask_scores" in name: continue A__ = name[:-6] A__ = model[f"""{prefix_}mask_scores"""] A__ , A__ = -0.1, 1.1 A__ = torch.sigmoid(lowercase_ ) A__ = s * (r - l) + l A__ = s_bar.clamp(min=0.0 , max=1.0 ) A__ = tensor * mask print(f"""Pruned layer {name}""" ) else: raise ValueError('''Unknown pruning method''' ) if target_model_path is None: A__ = os.path.join( os.path.dirname(lowercase_ ) , f"""bertarized_{os.path.basename(lowercase_ )}""" ) if not os.path.isdir(lowercase_ ): shutil.copytree(lowercase_ , lowercase_ ) print(f"""\nCreated folder {target_model_path}""" ) torch.save(lowercase_ , os.path.join(lowercase_ , '''pytorch_model.bin''' ) ) print('''\nPruned model saved! See you later!''' ) if __name__ == "__main__": _lowerCamelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument( """--pruning_method""", choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""], type=str, required=True, help=( """Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,""" """ sigmoied_threshold = Soft movement pruning)""" ), ) parser.add_argument( """--threshold""", type=float, required=False, help=( """For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.""" """For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.""" """Not needed for `l0`""" ), ) parser.add_argument( """--model_name_or_path""", type=str, required=True, help="""Folder containing the model that was previously fine-pruned""", ) parser.add_argument( """--target_model_path""", default=None, type=str, required=False, help="""Folder containing the model that was previously fine-pruned""", ) _lowerCamelCase : int = parser.parse_args() main(args)
14
0
"""simple docstring""" import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel from transformers.models.esm.modeling_esm import ( ESM_PRETRAINED_MODEL_ARCHIVE_LIST, EsmEmbeddings, create_position_ids_from_input_ids, ) class lowerCAmelCase_ : """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=33 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=None , ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = parent SCREAMING_SNAKE_CASE__ : str = batch_size SCREAMING_SNAKE_CASE__ : Optional[int] = seq_length SCREAMING_SNAKE_CASE__ : List[str] = is_training SCREAMING_SNAKE_CASE__ : Dict = use_input_mask SCREAMING_SNAKE_CASE__ : int = use_token_type_ids SCREAMING_SNAKE_CASE__ : Dict = use_labels SCREAMING_SNAKE_CASE__ : Tuple = vocab_size SCREAMING_SNAKE_CASE__ : Dict = hidden_size SCREAMING_SNAKE_CASE__ : Any = num_hidden_layers SCREAMING_SNAKE_CASE__ : List[str] = num_attention_heads SCREAMING_SNAKE_CASE__ : str = intermediate_size SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings SCREAMING_SNAKE_CASE__ : Dict = type_vocab_size SCREAMING_SNAKE_CASE__ : Any = type_sequence_label_size SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_labels SCREAMING_SNAKE_CASE__ : List[str] = num_choices SCREAMING_SNAKE_CASE__ : Tuple = scope def __magic_name__ (self ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE__ : Optional[int] = None if self.use_input_mask: SCREAMING_SNAKE_CASE__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE__ : List[str] = None SCREAMING_SNAKE_CASE__ : Optional[Any] = None SCREAMING_SNAKE_CASE__ : List[Any] = None if self.use_labels: SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def __magic_name__ (self ) -> List[str]: """simple docstring""" return EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = EsmModel(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() SCREAMING_SNAKE_CASE__ : Optional[Any] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : List[Any] = model(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = model(SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = EsmForMaskedLM(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() SCREAMING_SNAKE_CASE__ : List[Any] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = self.num_labels SCREAMING_SNAKE_CASE__ : Tuple = EsmForTokenClassification(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() SCREAMING_SNAKE_CASE__ : Optional[int] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __magic_name__ (self ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ) : Optional[int] = config_and_inputs SCREAMING_SNAKE_CASE__ : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowerCAmelCase_ (a__ , a__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase : Optional[Any] = False __UpperCamelCase : int = ( ( EsmForMaskedLM, EsmModel, EsmForSequenceClassification, EsmForTokenClassification, ) if is_torch_available() else () ) __UpperCamelCase : Optional[int] = () __UpperCamelCase : List[Any] = ( { '''feature-extraction''': EsmModel, '''fill-mask''': EsmForMaskedLM, '''text-classification''': EsmForSequenceClassification, '''token-classification''': EsmForTokenClassification, '''zero-shot''': EsmForSequenceClassification, } if is_torch_available() else {} ) __UpperCamelCase : Any = True def __magic_name__ (self ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = EsmModelTester(self ) SCREAMING_SNAKE_CASE__ : int = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 ) def __magic_name__ (self ) -> str: """simple docstring""" self.config_tester.run_common_tests() def __magic_name__ (self ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: SCREAMING_SNAKE_CASE__ : Optional[Any] = type self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE__ ) @slow def __magic_name__ (self ) -> List[Any]: """simple docstring""" for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE__ : Tuple = EsmModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()[0] SCREAMING_SNAKE_CASE__ : Dict = EsmEmbeddings(config=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : List[Any] = torch.as_tensor([[12, 31, 13, model.padding_idx]] ) SCREAMING_SNAKE_CASE__ : List[Any] = torch.as_tensor( [ [ 0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx, ] ] ) SCREAMING_SNAKE_CASE__ : List[Any] = create_position_ids_from_input_ids(SCREAMING_SNAKE_CASE__ , model.padding_idx ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) ) def __magic_name__ (self ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs()[0] SCREAMING_SNAKE_CASE__ : Any = EsmEmbeddings(config=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.empty(2 , 4 , 30 ) SCREAMING_SNAKE_CASE__ : Optional[Any] = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] SCREAMING_SNAKE_CASE__ : Optional[int] = torch.as_tensor([expected_single_positions, expected_single_positions] ) SCREAMING_SNAKE_CASE__ : Optional[Any] = embeddings.create_position_ids_from_inputs_embeds(SCREAMING_SNAKE_CASE__ ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) ) @unittest.skip("""Esm does not support embedding resizing""" ) def __magic_name__ (self ) -> List[Any]: """simple docstring""" pass @unittest.skip("""Esm does not support embedding resizing""" ) def __magic_name__ (self ) -> Optional[int]: """simple docstring""" pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def __magic_name__ (self ) -> Tuple: """simple docstring""" pass @require_torch class lowerCAmelCase_ (a__ ): """simple docstring""" @slow def __magic_name__ (self ) -> Any: """simple docstring""" with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Optional[int] = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" ) model.eval() SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor([[0, 1, 2, 3, 4, 5]] ) SCREAMING_SNAKE_CASE__ : Optional[int] = model(SCREAMING_SNAKE_CASE__ )[0] SCREAMING_SNAKE_CASE__ : Tuple = 33 SCREAMING_SNAKE_CASE__ : List[str] = torch.Size((1, 6, vocab_size) ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Dict = torch.tensor( [[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) ) @slow def __magic_name__ (self ) -> List[str]: """simple docstring""" with torch.no_grad(): SCREAMING_SNAKE_CASE__ : int = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" ) model.eval() SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) SCREAMING_SNAKE_CASE__ : Tuple = model(SCREAMING_SNAKE_CASE__ )[0] # compare the actual values for a slice. SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor( [[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
25
_lowerCamelCase : Optional[int] = 65521 def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int: """simple docstring""" A__ = 1 A__ = 0 for plain_chr in plain_text: A__ = (a + ord(lowercase_ )) % MOD_ADLER A__ = (b + a) % MOD_ADLER return (b << 16) | a
14
0
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) _snake_case = torch.device("cpu") def lowerCAmelCase_ ( ): _A : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg""" _A : Optional[Any] = Image.open(requests.get(snake_case_,stream=snake_case_ ).raw ) return im def lowerCAmelCase_ ( snake_case_ ): if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.17_03e00, 2.11_07e00, -2.08_11e00, 8.86_85e-01, 2.43_60e-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.96_36e-01, 2.34_78e-01, -1.69_63e00, -1.73_81e00, -8.63_37e-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.27_68e-01, -4.74_29e-01, -1.08_97e00, -1.02_48e00, 3.55_23e-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.53_30e-01, 2.42_11e-01, -6.01_85e-01, -8.27_89e-01, -6.04_46e-02] ) def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): _A : str = dct.pop(snake_case_ ) _A : Optional[Any] = val def lowerCAmelCase_ ( snake_case_ ): _A : int = [] for k in state_dict.keys(): _A : Optional[Any] = k if ".pwconv" in k: _A : int = k_new.replace(""".pwconv""",""".point_wise_conv""" ) if ".dwconv" in k: _A : Dict = k_new.replace(""".dwconv""",""".depth_wise_conv""" ) if ".Proj." in k: _A : str = k_new.replace(""".Proj.""",""".proj.""" ) if "patch_embed" in k_new: _A : Optional[Any] = k_new.replace("""patch_embed""","""swiftformer.patch_embed.patch_embedding""" ) if "network" in k_new: _A : Any = k_new.split(""".""" ) if ls[2].isdigit(): _A : Any = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] ) else: _A : Optional[Any] = k_new.replace("""network""","""swiftformer.encoder.network""" ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): _A : List[str] = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size _A : str = 1000 _A : List[str] = """huggingface/label-files""" _A : Union[str, Any] = """imagenet-1k-id2label.json""" _A : Tuple = json.load(open(hf_hub_download(snake_case_,snake_case_,repo_type="""dataset""" ),"""r""" ) ) _A : int = {int(snake_case_ ): v for k, v in idalabel.items()} _A : Any = idalabel _A : List[str] = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": _A : List[Any] = [3, 3, 6, 4] _A : List[Any] = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": _A : Dict = [3, 3, 9, 6] _A : Union[str, Any] = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": _A : Optional[Any] = [4, 3, 10, 5] _A : Optional[Any] = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": _A : Tuple = [4, 4, 12, 6] _A : List[Any] = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith("""https""" ): _A : Optional[Any] = torch.hub.load_state_dict_from_url(snake_case_,map_location="""cpu""",check_hash=snake_case_ ) else: _A : List[Any] = torch.load(snake_case_,map_location="""cpu""" ) _A : Dict = checkpoint _A : Dict = create_rename_keys(snake_case_ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(snake_case_,snake_case_,snake_case_ ) # load HuggingFace model _A : str = SwiftFormerForImageClassification(snake_case_ ).eval() hf_model.load_state_dict(snake_case_ ) # prepare test inputs _A : Any = prepare_img() _A : Optional[Any] = ViTImageProcessor.from_pretrained("""preprocessor_config""" ) _A : Any = processor(images=snake_case_,return_tensors="""pt""" ) # compare outputs from both models _A : Union[str, Any] = get_expected_output(snake_case_ ) _A : List[Any] = hf_model(inputs["""pixel_values"""] ).logits assert hf_logits.shape == torch.Size([1, 1000] ) assert torch.allclose(hf_logits[0, 0:5],snake_case_,atol=1e-3 ) Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) print(f'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' ) hf_model.save_pretrained(snake_case_ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( "--swiftformer_name", default="swiftformer_xs", choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"], type=str, help="Name of the SwiftFormer model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default="./converted_outputs/", type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.") _snake_case = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
26
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer _lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) _lowerCamelCase : Tuple = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} _lowerCamelCase : Union[str, Any] = { """vocab_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } _lowerCamelCase : str = { """vocab_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } _lowerCamelCase : str = { """vocab_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json""" ), }, } _lowerCamelCase : Any = { """facebook/dpr-ctx_encoder-single-nq-base""": 512, """facebook/dpr-ctx_encoder-multiset-base""": 512, } _lowerCamelCase : List[str] = { """facebook/dpr-question_encoder-single-nq-base""": 512, """facebook/dpr-question_encoder-multiset-base""": 512, } _lowerCamelCase : Tuple = { """facebook/dpr-reader-single-nq-base""": 512, """facebook/dpr-reader-multiset-base""": 512, } _lowerCamelCase : Optional[Any] = { """facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True}, } _lowerCamelCase : Optional[int] = { """facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True}, } _lowerCamelCase : Optional[Any] = { """facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True}, } class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ = DPRContextEncoderTokenizer class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ = DPRQuestionEncoderTokenizer _lowerCamelCase : int = collections.namedtuple( """DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""] ) _lowerCamelCase : Any = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""]) _lowerCamelCase : Dict = r""" Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. """ @add_start_docstrings(UpperCAmelCase__ ) class UpperCamelCase_ : '''simple docstring''' def __call__( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Union[bool, str] = False , UpperCAmelCase__ : Union[bool, str] = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Optional[bool] = None , **UpperCAmelCase__ : Optional[int] , ) ->BatchEncoding: '''simple docstring''' if titles is None and texts is None: return super().__call__( UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , ) elif titles is None or texts is None: A__ = titles if texts is None else texts return super().__call__( UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , ) A__ = titles if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [titles] A__ = texts if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [texts] A__ = len(UpperCAmelCase__) A__ = questions if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [questions] * n_passages assert len(UpperCAmelCase__) == len( UpperCAmelCase__), f"""There should be as many titles than texts but got {len(UpperCAmelCase__)} titles and {len(UpperCAmelCase__)} texts.""" A__ = super().__call__(UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__)['''input_ids'''] A__ = super().__call__(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__)['''input_ids'''] A__ = { '''input_ids''': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(UpperCAmelCase__ , UpperCAmelCase__) ] } if return_attention_mask is not False: A__ = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids]) A__ = attention_mask return self.pad(UpperCAmelCase__ , padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : BatchEncoding , UpperCAmelCase__ : DPRReaderOutput , UpperCAmelCase__ : int = 16 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 4 , ) ->List[DPRSpanPrediction]: '''simple docstring''' A__ = reader_input['''input_ids'''] A__ , A__ , A__ = reader_output[:3] A__ = len(UpperCAmelCase__) A__ = sorted(range(UpperCAmelCase__) , reverse=UpperCAmelCase__ , key=relevance_logits.__getitem__) A__ = [] for doc_id in sorted_docs: A__ = list(input_ids[doc_id]) # assuming question & title information is at the beginning of the sequence A__ = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: A__ = sequence_ids.index(self.pad_token_id) else: A__ = len(UpperCAmelCase__) A__ = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCAmelCase__ , top_spans=UpperCAmelCase__ , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCAmelCase__ , start_index=UpperCAmelCase__ , end_index=UpperCAmelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1]) , )) if len(UpperCAmelCase__) >= num_spans: break return nbest_spans_predictions[:num_spans] def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , ) ->List[DPRSpanPrediction]: '''simple docstring''' A__ = [] for start_index, start_score in enumerate(UpperCAmelCase__): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]): scores.append(((start_index, start_index + answer_length), start_score + end_score)) A__ = sorted(UpperCAmelCase__ , key=lambda UpperCAmelCase__: x[1] , reverse=UpperCAmelCase__) A__ = [] for (start_index, end_index), score in scores: assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]""" A__ = end_index - start_index + 1 assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}""" if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals): continue chosen_span_intervals.append((start_index, end_index)) if len(UpperCAmelCase__) == top_spans: break return chosen_span_intervals @add_end_docstrings(UpperCAmelCase__ ) class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = READER_PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ = ['''input_ids''', '''attention_mask'''] UpperCAmelCase__ = DPRReaderTokenizer
14
0
'''simple docstring''' import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCamelCase : def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=True , __a=False , __a=False , __a=False , __a=2 , __a=99 , __a=0 , __a=32 , __a=5 , __a=4 , __a=0.1 , __a=0.1 , __a=512 , __a=2 , __a=0.02 , __a=2 , __a=4 , __a="last" , __a=True , __a=None , __a=0 , ): '''simple docstring''' __a : List[str] = parent __a : int = batch_size __a : str = seq_length __a : Optional[int] = is_training __a : Any = use_input_lengths __a : Optional[int] = use_token_type_ids __a : int = use_labels __a : Dict = gelu_activation __a : int = sinusoidal_embeddings __a : str = causal __a : Union[str, Any] = asm __a : Optional[Any] = n_langs __a : Union[str, Any] = vocab_size __a : List[str] = n_special __a : Tuple = hidden_size __a : Any = num_hidden_layers __a : Dict = num_attention_heads __a : Tuple = hidden_dropout_prob __a : Dict = attention_probs_dropout_prob __a : Tuple = max_position_embeddings __a : Optional[Any] = type_sequence_label_size __a : List[Any] = initializer_range __a : Tuple = num_labels __a : List[str] = num_choices __a : str = summary_type __a : int = use_proj __a : Dict = scope __a : Optional[int] = bos_token_id def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __a : Tuple = None if self.use_input_lengths: __a : List[str] = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length __a : Optional[Any] = None if self.use_token_type_ids: __a : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) __a : Optional[Any] = None __a : Optional[int] = None __a : List[Any] = None if self.use_labels: __a : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __a : List[Any] = ids_tensor([self.batch_size] , 2 ).float() __a : Any = ids_tensor([self.batch_size] , self.num_choices ) __a : List[str] = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def __UpperCAmelCase ( self ): '''simple docstring''' return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ): '''simple docstring''' __a : Any = XLMModel(config=__a ) model.to(__a ) model.eval() __a : Optional[Any] = model(__a , lengths=__a , langs=__a ) __a : Union[str, Any] = model(__a , langs=__a ) __a : Optional[int] = model(__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ): '''simple docstring''' __a : Optional[Any] = XLMWithLMHeadModel(__a ) model.to(__a ) model.eval() __a : Optional[Any] = model(__a , token_type_ids=__a , labels=__a ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ): '''simple docstring''' __a : List[str] = XLMForQuestionAnsweringSimple(__a ) model.to(__a ) model.eval() __a : List[str] = model(__a ) __a : Optional[int] = model(__a , start_positions=__a , end_positions=__a ) __a : Optional[Any] = outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ): '''simple docstring''' __a : Any = XLMForQuestionAnswering(__a ) model.to(__a ) model.eval() __a : Dict = model(__a ) __a : Optional[Any] = model( __a , start_positions=__a , end_positions=__a , cls_index=__a , is_impossible=__a , p_mask=__a , ) __a : List[Any] = model( __a , start_positions=__a , end_positions=__a , cls_index=__a , is_impossible=__a , ) ((__a) , ) : Union[str, Any] = result_with_labels.to_tuple() __a : Union[str, Any] = model(__a , start_positions=__a , end_positions=__a ) ((__a) , ) : Dict = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ): '''simple docstring''' __a : List[Any] = XLMForSequenceClassification(__a ) model.to(__a ) model.eval() __a : Optional[int] = model(__a ) __a : Union[str, Any] = model(__a , labels=__a ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ): '''simple docstring''' __a : List[Any] = self.num_labels __a : Any = XLMForTokenClassification(__a ) model.to(__a ) model.eval() __a : Optional[int] = model(__a , attention_mask=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ): '''simple docstring''' __a : str = self.num_choices __a : Union[str, Any] = XLMForMultipleChoice(config=__a ) model.to(__a ) model.eval() __a : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __a : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __a : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __a : str = model( __a , attention_mask=__a , token_type_ids=__a , labels=__a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = self.prepare_config_and_inputs() ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : Optional[int] = config_and_inputs __a : Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths} return config, inputs_dict @require_torch class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): A_ = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) A_ = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable A_ = ( { "feature-extraction": XLMModel, "fill-mask": XLMWithLMHeadModel, "question-answering": XLMForQuestionAnsweringSimple, "text-classification": XLMForSequenceClassification, "text-generation": XLMWithLMHeadModel, "token-classification": XLMForTokenClassification, "zero-shot": XLMForSequenceClassification, } if is_torch_available() else {} ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a ): '''simple docstring''' if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('Fast' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def __UpperCAmelCase ( self , __a , __a , __a=False ): '''simple docstring''' __a : Optional[Any] = super()._prepare_for_class(__a , __a , return_labels=__a ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": __a : List[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__a ) __a : Any = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__a ) return inputs_dict def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = XLMModelTester(self ) __a : Union[str, Any] = ConfigTester(self , config_class=__a , emb_dim=37 ) def __UpperCAmelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*__a ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a=False , __a=1 ): '''simple docstring''' self.assertIsInstance(__a , __a ) self.assertListEqual( [isinstance(__a , __a ) for iter_attentions in attentions] , [True] * len(__a ) ) self.assertEqual(len(__a ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(__a ): # adds PAD dummy token __a : Optional[Any] = min_length + idx + 1 __a : List[str] = min_length + idx + 1 __a : List[Any] = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__a ) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a=False , __a=1 ): '''simple docstring''' self.assertIsInstance(__a , __a ) self.assertListEqual( [isinstance(__a , __a ) for iter_hidden_states in hidden_states] , [True] * len(__a ) , ) self.assertEqual(len(__a ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(__a ): # adds PAD dummy token __a : List[str] = min_length + idx + 1 __a : Dict = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__a ) , ) pass @slow def __UpperCAmelCase ( self ): '''simple docstring''' for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a : Optional[Any] = XLMModel.from_pretrained(__a ) self.assertIsNotNone(__a ) @require_torch class __UpperCamelCase ( unittest.TestCase ): @slow def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' ) model.to(__a ) __a : Union[str, Any] = torch.tensor([[14, 447]] , dtype=torch.long , device=__a ) # the president __a : int = [ 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference __a : Optional[Any] = model.generate(__a , do_sample=__a ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __a )
27
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase : Any = logging.get_logger(__name__) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''encoder-decoder''' UpperCAmelCase__ = True def __init__( self : List[str] , **UpperCAmelCase__ : Union[str, Any]) ->List[Any]: '''simple docstring''' super().__init__(**UpperCAmelCase__) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" A__ = kwargs.pop('''encoder''') A__ = encoder_config.pop('''model_type''') A__ = kwargs.pop('''decoder''') A__ = decoder_config.pop('''model_type''') from ..auto.configuration_auto import AutoConfig A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__) A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__) A__ = True @classmethod def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Union[str, Any]) ->PretrainedConfig: '''simple docstring''' logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''') A__ = True A__ = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]: '''simple docstring''' A__ = copy.deepcopy(self.__dict__) A__ = self.encoder.to_dict() A__ = self.decoder.to_dict() A__ = self.__class__.model_type return output
14
0
'''simple docstring''' def __lowerCamelCase ( A__ , A__ ) -> str: """simple docstring""" if a < 0 or b < 0: raise ValueError('the value of both inputs must be positive' ) UpperCamelCase = str(bin(A__ ) )[2:] # remove the leading "0b" UpperCamelCase = str(bin(A__ ) )[2:] # remove the leading "0b" UpperCamelCase = max(len(A__ ) , len(A__ ) ) return "0b" + "".join( str(int(char_a == '1' and char_b == '1' ) ) for char_a, char_b in zip(a_binary.zfill(A__ ) , b_binary.zfill(A__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
28
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any: """simple docstring""" A__ = [0] * len(lowercase_ ) A__ = [] A__ = [1] * len(lowercase_ ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(lowercase_ ) ): if indegree[i] == 0: queue.append(lowercase_ ) while queue: A__ = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: A__ = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(lowercase_ ) print(max(lowercase_ ) ) # Adjacency list of Graph _lowerCamelCase : Optional[int] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
14
0
import glob import os import random from string import ascii_lowercase, digits import cva import numpy as np # Parrameters __UpperCAmelCase = (720, 1280) # Height, Width __UpperCAmelCase = (0.4, 0.6) # if height or width lower than this scale, drop it. __UpperCAmelCase = 1 / 100 __UpperCAmelCase = '' __UpperCAmelCase = '' __UpperCAmelCase = '' __UpperCAmelCase = 250 def lowercase__ ( ): '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ : Tuple = get_dataset(__snake_case , __snake_case ) for index in range(__snake_case ): UpperCAmelCase_ : Optional[int] = random.sample(range(len(__snake_case ) ) , 4 ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = update_image_and_anno( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , filter_scale=__snake_case , ) # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' UpperCAmelCase_ : Optional[Any] = random_chars(32 ) UpperCAmelCase_ : Optional[int] = path.split(os.sep )[-1].rsplit('.' , 1 )[0] UpperCAmelCase_ : Optional[int] = F"{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}" cva.imwrite(F"{file_root}.jpg" , __snake_case , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(F"Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}" ) UpperCAmelCase_ : Optional[Any] = [] for anno in new_annos: UpperCAmelCase_ : str = anno[3] - anno[1] UpperCAmelCase_ : Any = anno[4] - anno[2] UpperCAmelCase_ : Any = anno[1] + width / 2 UpperCAmelCase_ : Dict = anno[2] + height / 2 UpperCAmelCase_ : Union[str, Any] = F"{anno[0]} {x_center} {y_center} {width} {height}" annos_list.append(__snake_case ) with open(F"{file_root}.txt" , 'w' ) as outfile: outfile.write('\n'.join(line for line in annos_list ) ) def lowercase__ ( __snake_case : str , __snake_case : str ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = [] UpperCAmelCase_ : Dict = [] for label_file in glob.glob(os.path.join(__snake_case , '*.txt' ) ): UpperCAmelCase_ : Union[str, Any] = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0] with open(__snake_case ) as in_file: UpperCAmelCase_ : str = in_file.readlines() UpperCAmelCase_ : Optional[int] = os.path.join(__snake_case , F"{label_name}.jpg" ) UpperCAmelCase_ : List[Any] = [] for obj_list in obj_lists: UpperCAmelCase_ : str = obj_list.rstrip('\n' ).split(' ' ) UpperCAmelCase_ : str = float(obj[1] ) - float(obj[3] ) / 2 UpperCAmelCase_ : Optional[Any] = float(obj[2] ) - float(obj[4] ) / 2 UpperCAmelCase_ : Any = float(obj[1] ) + float(obj[3] ) / 2 UpperCAmelCase_ : Dict = float(obj[2] ) + float(obj[4] ) / 2 boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] ) if not boxes: continue img_paths.append(__snake_case ) labels.append(__snake_case ) return img_paths, labels def lowercase__ ( __snake_case : list , __snake_case : list , __snake_case : list[int] , __snake_case : tuple[int, int] , __snake_case : tuple[float, float] , __snake_case : float = 0.0 , ): '''simple docstring''' UpperCAmelCase_ : Tuple = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta ) UpperCAmelCase_ : List[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) UpperCAmelCase_ : List[str] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) UpperCAmelCase_ : Any = int(scale_x * output_size[1] ) UpperCAmelCase_ : List[str] = int(scale_y * output_size[0] ) UpperCAmelCase_ : Any = [] UpperCAmelCase_ : Tuple = [] for i, index in enumerate(__snake_case ): UpperCAmelCase_ : Any = all_img_list[index] path_list.append(__snake_case ) UpperCAmelCase_ : Optional[Any] = all_annos[index] UpperCAmelCase_ : Tuple = cva.imread(__snake_case ) if i == 0: # top-left UpperCAmelCase_ : Any = cva.resize(__snake_case , (divid_point_x, divid_point_y) ) UpperCAmelCase_ : str = img for bbox in img_annos: UpperCAmelCase_ : Optional[int] = bbox[1] * scale_x UpperCAmelCase_ : str = bbox[2] * scale_y UpperCAmelCase_ : Dict = bbox[3] * scale_x UpperCAmelCase_ : Optional[int] = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 1: # top-right UpperCAmelCase_ : Optional[int] = cva.resize(__snake_case , (output_size[1] - divid_point_x, divid_point_y) ) UpperCAmelCase_ : Dict = img for bbox in img_annos: UpperCAmelCase_ : Any = scale_x + bbox[1] * (1 - scale_x) UpperCAmelCase_ : Dict = bbox[2] * scale_y UpperCAmelCase_ : Dict = scale_x + bbox[3] * (1 - scale_x) UpperCAmelCase_ : Any = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 2: # bottom-left UpperCAmelCase_ : Dict = cva.resize(__snake_case , (divid_point_x, output_size[0] - divid_point_y) ) UpperCAmelCase_ : Union[str, Any] = img for bbox in img_annos: UpperCAmelCase_ : str = bbox[1] * scale_x UpperCAmelCase_ : Union[str, Any] = scale_y + bbox[2] * (1 - scale_y) UpperCAmelCase_ : str = bbox[3] * scale_x UpperCAmelCase_ : Optional[Any] = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) else: # bottom-right UpperCAmelCase_ : Optional[Any] = cva.resize( __snake_case , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) ) UpperCAmelCase_ : List[Any] = img for bbox in img_annos: UpperCAmelCase_ : List[str] = scale_x + bbox[1] * (1 - scale_x) UpperCAmelCase_ : int = scale_y + bbox[2] * (1 - scale_y) UpperCAmelCase_ : str = scale_x + bbox[3] * (1 - scale_x) UpperCAmelCase_ : Tuple = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) # Remove bounding box small than scale of filter if filter_scale > 0: UpperCAmelCase_ : int = [ anno for anno in new_anno if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2]) ] return output_img, new_anno, path_list[0] def lowercase__ ( __snake_case : int ): '''simple docstring''' assert number_char > 1, "The number of character should greater than 1" UpperCAmelCase_ : int = ascii_lowercase + digits return "".join(random.choice(__snake_case ) for _ in range(__snake_case ) ) if __name__ == "__main__": main() print('DONE ✅')
29
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline _lowerCamelCase : Optional[Any] = datasets.utils.logging.get_logger(__name__) @dataclass class UpperCamelCase_ ( datasets.BuilderConfig ): '''simple docstring''' UpperCAmelCase__ = None UpperCAmelCase__ = "utf-8" UpperCAmelCase__ = None UpperCAmelCase__ = None UpperCAmelCase__ = True # deprecated UpperCAmelCase__ = None # deprecated UpperCAmelCase__ = 10 << 20 # 10MB UpperCAmelCase__ = None class UpperCamelCase_ ( datasets.ArrowBasedBuilder ): '''simple docstring''' UpperCAmelCase__ = JsonConfig def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str: '''simple docstring''' if self.config.block_size is not None: logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''') A__ = self.config.block_size if self.config.use_threads is not True: logger.warning( '''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''') if self.config.newlines_in_values is not None: raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''') return datasets.DatasetInfo(features=self.config.features) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : List[Any]) ->Dict: '''simple docstring''' if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""") A__ = dl_manager.download_and_extract(self.config.data_files) if isinstance(UpperCAmelCase__ , (str, list, tuple)): A__ = data_files if isinstance(UpperCAmelCase__ , UpperCAmelCase__): A__ = [files] A__ = [dl_manager.iter_files(UpperCAmelCase__) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files})] A__ = [] for split_name, files in data_files.items(): if isinstance(UpperCAmelCase__ , UpperCAmelCase__): A__ = [files] A__ = [dl_manager.iter_files(UpperCAmelCase__) for file in files] splits.append(datasets.SplitGenerator(name=UpperCAmelCase__ , gen_kwargs={'''files''': files})) return splits def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : pa.Table) ->pa.Table: '''simple docstring''' if self.config.features is not None: # adding missing columns for column_name in set(self.config.features) - set(pa_table.column_names): A__ = self.config.features.arrow_schema.field(UpperCAmelCase__).type A__ = pa_table.append_column(UpperCAmelCase__ , pa.array([None] * len(UpperCAmelCase__) , type=UpperCAmelCase__)) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example A__ = table_cast(UpperCAmelCase__ , self.config.features.arrow_schema) return pa_table def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Tuple) ->str: '''simple docstring''' for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase__)): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(UpperCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: A__ = json.load(UpperCAmelCase__) # We keep only the field we are interested in A__ = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(UpperCAmelCase__ , (list, tuple)): A__ = set().union(*[row.keys() for row in dataset]) A__ = {col: [row.get(UpperCAmelCase__) for row in dataset] for col in keys} else: A__ = dataset A__ = pa.Table.from_pydict(UpperCAmelCase__) yield file_idx, self._cast_table(UpperCAmelCase__) # If the file has one json object per line else: with open(UpperCAmelCase__ , '''rb''') as f: A__ = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small A__ = max(self.config.chunksize // 32 , 16 << 10) A__ = ( self.config.encoding_errors if self.config.encoding_errors is not None else '''strict''' ) while True: A__ = f.read(self.config.chunksize) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(UpperCAmelCase__) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": A__ = batch.decode(self.config.encoding , errors=UpperCAmelCase__).encode('''utf-8''') try: while True: try: A__ = paj.read_json( io.BytesIO(UpperCAmelCase__) , read_options=paj.ReadOptions(block_size=UpperCAmelCase__)) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(UpperCAmelCase__ , pa.ArrowInvalid) and "straddling" not in str(UpperCAmelCase__) or block_size > len(UpperCAmelCase__) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( f"""Batch of {len(UpperCAmelCase__)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""") block_size *= 2 except pa.ArrowInvalid as e: try: with open( UpperCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: A__ = json.load(UpperCAmelCase__) except json.JSONDecodeError: logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""") raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(UpperCAmelCase__ , UpperCAmelCase__): # list is the only sequence type supported in JSON try: A__ = set().union(*[row.keys() for row in dataset]) A__ = {col: [row.get(UpperCAmelCase__) for row in dataset] for col in keys} A__ = pa.Table.from_pydict(UpperCAmelCase__) except (pa.ArrowInvalid, AttributeError) as e: logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""") raise ValueError(f"""Not able to read records in the JSON file at {file}.""") from None yield file_idx, self._cast_table(UpperCAmelCase__) break else: logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""") raise ValueError( f"""Not able to read records in the JSON file at {file}. """ f"""You should probably indicate the field of the JSON file containing your records. """ f"""This JSON file contain the following fields: {str(list(dataset.keys()))}. """ f"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase__) batch_idx += 1
14
0
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , ) @pytest.mark.usefixtures('sm_env' ) @parameterized_class( [ { 'framework': 'pytorch', 'script': 'run_glue_model_parallelism.py', 'model_name_or_path': 'roberta-large', 'instance_type': 'ml.p3dn.24xlarge', 'results': {'train_runtime': 1_600, 'eval_accuracy': 0.3, 'eval_loss': 1.2}, }, { 'framework': 'pytorch', 'script': 'run_glue.py', 'model_name_or_path': 'roberta-large', 'instance_type': 'ml.p3dn.24xlarge', 'results': {'train_runtime': 1_600, 'eval_accuracy': 0.3, 'eval_loss': 1.2}, }, ] ) class lowercase__( unittest.TestCase ): """simple docstring""" def _lowercase ( self : str ) -> str: if self.framework == "pytorch": subprocess.run( f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='''utf-8''' , check=SCREAMING_SNAKE_CASE_ , ) assert hasattr(self , '''env''' ) def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[int]: # configuration for running training on smdistributed Model Parallel lowercase_ = { '''enabled''': True, '''processes_per_host''': 8, } lowercase_ = { '''enabled''': True, '''parameters''': { '''microbatches''': 4, '''placement_strategy''': '''spread''', '''pipeline''': '''interleaved''', '''optimize''': '''speed''', '''partitions''': 4, '''ddp''': True, }, } lowercase_ = {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options} lowercase_ = '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer''' # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=SCREAMING_SNAKE_CASE_ , instance_type=self.instance_type , debugger_hook_config=SCREAMING_SNAKE_CASE_ , hyperparameters={ **self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path, '''max_steps''': 5_0_0, } , metric_definitions=self.env.metric_definitions , distribution=SCREAMING_SNAKE_CASE_ , py_version='''py36''' , ) def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : Tuple ) -> List[str]: TrainingJobAnalytics(SCREAMING_SNAKE_CASE_ ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' ) @parameterized.expand([(1,)] ) def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : Any ) -> Any: # create estimator lowercase_ = self.create_estimator(SCREAMING_SNAKE_CASE_ ) # run training estimator.fit() # result dataframe lowercase_ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis lowercase_ = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] ) lowercase_ = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping lowercase_ = ( Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 9_9_9_9_9_9 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy ) assert all(t <= self.results['''eval_loss'''] for t in eval_loss ) # dump tests result into json file to share in PR with open(f'''{estimator.latest_training_job.name}.json''' , '''w''' ) as outfile: json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , SCREAMING_SNAKE_CASE_ )
30
import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch _lowerCamelCase : List[Any] = """sshleifer/bart-tiny-random""" _lowerCamelCase : List[Any] = """patrickvonplaten/t5-tiny-random""" @require_torch class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int: '''simple docstring''' return AutoConfig.from_pretrained(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any: '''simple docstring''' A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1) self.assertEqual(student.config.num_hidden_layers , 1) def SCREAMING_SNAKE_CASE ( self : int) ->Any: '''simple docstring''' A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]: '''simple docstring''' A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase__) self.assertEqual(student.config.encoder_layers , 1) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers) def SCREAMING_SNAKE_CASE ( self : Dict) ->int: '''simple docstring''' A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1) self.assertEqual(student.config.encoder_layers , 1) self.assertEqual(student.config.decoder_layers , 1) def SCREAMING_SNAKE_CASE ( self : str) ->List[Any]: '''simple docstring''' with self.assertRaises(UpperCAmelCase__): create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=UpperCAmelCase__ , d=UpperCAmelCase__)
14
0
'''simple docstring''' import itertools import random import unittest import numpy as np from transformers import is_speech_available from transformers.testing_utils import require_torch, require_torchaudio from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import SpeechaTextFeatureExtractor __SCREAMING_SNAKE_CASE : Tuple = random.Random() def UpperCamelCase_ ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any=1.0 , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : List[str]=None ) -> str: """simple docstring""" if rng is None: _UpperCAmelCase : List[Any] = global_rng _UpperCAmelCase : Optional[Any] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class lowerCamelCase_ (unittest.TestCase ): '''simple docstring''' def __init__( self : int , A : str , A : Dict=7 , A : List[Any]=400 , A : Union[str, Any]=2000 , A : str=24 , A : Optional[Any]=24 , A : Optional[Any]=0.0 , A : Optional[int]=16000 , A : str=True , A : Optional[Any]=True , ): _UpperCAmelCase : Optional[int] = parent _UpperCAmelCase : int = batch_size _UpperCAmelCase : Tuple = min_seq_length _UpperCAmelCase : List[str] = max_seq_length _UpperCAmelCase : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) _UpperCAmelCase : Dict = feature_size _UpperCAmelCase : List[Any] = num_mel_bins _UpperCAmelCase : Union[str, Any] = padding_value _UpperCAmelCase : Optional[Any] = sampling_rate _UpperCAmelCase : Optional[int] = return_attention_mask _UpperCAmelCase : Tuple = do_normalize def _A ( self : Union[str, Any] ): return { "feature_size": self.feature_size, "num_mel_bins": self.num_mel_bins, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def _A ( self : str , A : Tuple=False , A : Any=False ): def _flatten(A : Optional[Any] ): return list(itertools.chain(*A ) ) if equal_length: _UpperCAmelCase : List[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size _UpperCAmelCase : Dict = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: _UpperCAmelCase : Optional[int] = [np.asarray(A ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class lowerCamelCase_ (snake_case__ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase: int = SpeechaTextFeatureExtractor if is_speech_available() else None def _A ( self : Any ): _UpperCAmelCase : Optional[Any] = SpeechaTextFeatureExtractionTester(self ) def _A ( self : Optional[Any] , A : Any ): self.assertTrue(np.all(np.mean(A , axis=0 ) < 1E-3 ) ) self.assertTrue(np.all(np.abs(np.var(A , axis=0 ) - 1 ) < 1E-3 ) ) def _A ( self : Any ): # Tests that all call wrap to encode_plus and batch_encode_plus _UpperCAmelCase : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 _UpperCAmelCase : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] _UpperCAmelCase : int = [np.asarray(A ) for speech_input in speech_inputs] # Test feature size _UpperCAmelCase : Tuple = feature_extractor(A , padding=A , return_tensors="np" ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size ) # Test not batched input _UpperCAmelCase : Optional[Any] = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features _UpperCAmelCase : str = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features self.assertTrue(np.allclose(A , A , atol=1E-3 ) ) # Test batched _UpperCAmelCase : List[str] = feature_extractor(A , return_tensors="np" ).input_features _UpperCAmelCase : List[str] = feature_extractor(A , return_tensors="np" ).input_features for enc_seq_a, enc_seq_a in zip(A , A ): self.assertTrue(np.allclose(A , A , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. _UpperCAmelCase : str = [floats_list((1, x) )[0] for x in (800, 800, 800)] _UpperCAmelCase : Tuple = np.asarray(A ) _UpperCAmelCase : Union[str, Any] = feature_extractor(A , return_tensors="np" ).input_features _UpperCAmelCase : Any = feature_extractor(A , return_tensors="np" ).input_features for enc_seq_a, enc_seq_a in zip(A , A ): self.assertTrue(np.allclose(A , A , atol=1E-3 ) ) def _A ( self : List[Any] ): _UpperCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _UpperCAmelCase : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] _UpperCAmelCase : Tuple = ["longest", "max_length", "do_not_pad"] _UpperCAmelCase : int = [None, 16, None] for max_length, padding in zip(A , A ): _UpperCAmelCase : str = feature_extractor( A , padding=A , max_length=A , return_attention_mask=A ) _UpperCAmelCase : int = inputs.input_features _UpperCAmelCase : Any = inputs.attention_mask _UpperCAmelCase : Dict = [np.sum(A ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def _A ( self : Union[str, Any] ): _UpperCAmelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _UpperCAmelCase : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] _UpperCAmelCase : Optional[int] = ["longest", "max_length", "do_not_pad"] _UpperCAmelCase : Optional[int] = [None, 16, None] for max_length, padding in zip(A , A ): _UpperCAmelCase : List[Any] = feature_extractor( A , max_length=A , padding=A , return_tensors="np" , return_attention_mask=A ) _UpperCAmelCase : Dict = inputs.input_features _UpperCAmelCase : Dict = inputs.attention_mask _UpperCAmelCase : str = [np.sum(A ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def _A ( self : Dict ): _UpperCAmelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _UpperCAmelCase : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] _UpperCAmelCase : Optional[int] = feature_extractor( A , padding="max_length" , max_length=4 , truncation=A , return_tensors="np" , return_attention_mask=A , ) _UpperCAmelCase : List[Any] = inputs.input_features _UpperCAmelCase : List[Any] = inputs.attention_mask _UpperCAmelCase : List[str] = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1] ) self._check_zero_mean_unit_variance(input_features[2] ) def _A ( self : Optional[int] ): _UpperCAmelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _UpperCAmelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] _UpperCAmelCase : Optional[int] = feature_extractor( A , padding="longest" , max_length=4 , truncation=A , return_tensors="np" , return_attention_mask=A , ) _UpperCAmelCase : Dict = inputs.input_features _UpperCAmelCase : Tuple = inputs.attention_mask _UpperCAmelCase : List[Any] = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape , (3, 4, 24) ) _UpperCAmelCase : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] _UpperCAmelCase : Dict = feature_extractor( A , padding="longest" , max_length=16 , truncation=A , return_tensors="np" , return_attention_mask=A , ) _UpperCAmelCase : int = inputs.input_features _UpperCAmelCase : Tuple = inputs.attention_mask _UpperCAmelCase : Any = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape , (3, 6, 24) ) def _A ( self : Optional[int] ): import torch _UpperCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _UpperCAmelCase : str = np.random.rand(100 , 32 ).astype(np.floataa ) _UpperCAmelCase : Dict = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: _UpperCAmelCase : Tuple = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) _UpperCAmelCase : List[str] = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def _A ( self : Dict , A : List[str] ): from datasets import load_dataset _UpperCAmelCase : List[str] = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" ) # automatic decoding with librispeech _UpperCAmelCase : Dict = ds.sort("id" ).select(range(A ) )[:num_samples]["audio"] return [x["array"] for x in speech_samples] def _A ( self : str ): # fmt: off _UpperCAmelCase : str = np.array([ -1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241, -1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128, -1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625, ] ) # fmt: on _UpperCAmelCase : int = self._load_datasamples(1 ) _UpperCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _UpperCAmelCase : Optional[int] = feature_extractor(A , return_tensors="pt" ).input_features self.assertEquals(input_features.shape , (1, 584, 24) ) self.assertTrue(np.allclose(input_features[0, 0, :30] , A , atol=1E-4 ) )
31
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Dict=3 , UpperCAmelCase__ : List[Any]=30 , UpperCAmelCase__ : Any=400 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Optional[Any]=[0.5, 0.5, 0.5] , UpperCAmelCase__ : Any=[0.5, 0.5, 0.5] , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Optional[int]=1 / 255 , UpperCAmelCase__ : Optional[Any]=True , ) ->str: '''simple docstring''' A__ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333} A__ = parent A__ = batch_size A__ = num_channels A__ = min_resolution A__ = max_resolution A__ = do_resize A__ = size A__ = do_normalize A__ = image_mean A__ = image_std A__ = do_rescale A__ = rescale_factor A__ = do_pad def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]: '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int=False) ->Optional[Any]: '''simple docstring''' if not batched: A__ = image_inputs[0] if isinstance(UpperCAmelCase__ , Image.Image): A__ , A__ = image.size else: A__ , A__ = image.shape[1], image.shape[2] if w < h: A__ = int(self.size['''shortest_edge'''] * h / w) A__ = self.size['''shortest_edge'''] elif w > h: A__ = self.size['''shortest_edge'''] A__ = int(self.size['''shortest_edge'''] * w / h) else: A__ = self.size['''shortest_edge'''] A__ = self.size['''shortest_edge'''] else: A__ = [] for image in image_inputs: A__ , A__ = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) A__ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__: item[0])[0] A__ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__: item[1])[1] return expected_height, expected_width @require_torch @require_vision class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = DeformableDetrImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple: '''simple docstring''' A__ = DeformableDetrImageProcessingTester(self) @property def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(UpperCAmelCase__ , '''image_mean''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''image_std''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_rescale''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_pad''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''size''')) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int: '''simple docstring''' A__ = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333}) self.assertEqual(image_processor.do_pad , UpperCAmelCase__) A__ = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCAmelCase__) self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84}) self.assertEqual(image_processor.do_pad , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]: '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) # create random PIL images A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , Image.Image) # Test not batched input A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__) A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , np.ndarray) # Test not batched input A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE ( self : int) ->Tuple: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , torch.Tensor) # Test not batched input A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[str]: '''simple docstring''' A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''') with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''') as f: A__ = json.loads(f.read()) A__ = {'''image_id''': 39_769, '''annotations''': target} # encode them A__ = DeformableDetrImageProcessor() A__ = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , return_tensors='''pt''') # verify pixel values A__ = torch.Size([1, 3, 800, 1_066]) self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase__) A__ = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase__ , atol=1e-4)) # verify area A__ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase__)) # verify boxes A__ = torch.Size([6, 4]) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase__) A__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase__ , atol=1e-3)) # verify image_id A__ = torch.tensor([39_769]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase__)) # verify is_crowd A__ = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase__)) # verify class_labels A__ = torch.tensor([75, 75, 63, 65, 17, 17]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase__)) # verify orig_size A__ = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase__)) # verify size A__ = torch.tensor([800, 1_066]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase__)) @slow def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[int]: '''simple docstring''' A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''') with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''') as f: A__ = json.loads(f.read()) A__ = {'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target} A__ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''') # encode them A__ = DeformableDetrImageProcessor(format='''coco_panoptic''') A__ = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , masks_path=UpperCAmelCase__ , return_tensors='''pt''') # verify pixel values A__ = torch.Size([1, 3, 800, 1_066]) self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase__) A__ = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase__ , atol=1e-4)) # verify area A__ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase__)) # verify boxes A__ = torch.Size([6, 4]) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase__) A__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase__ , atol=1e-3)) # verify image_id A__ = torch.tensor([39_769]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase__)) # verify is_crowd A__ = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase__)) # verify class_labels A__ = torch.tensor([17, 17, 63, 75, 75, 93]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase__)) # verify masks A__ = 822_873 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCAmelCase__) # verify orig_size A__ = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase__)) # verify size A__ = torch.tensor([800, 1_066]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase__))
14
0
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : Union[List[np.ndarray], torch.FloatTensor] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
32
from __future__ import annotations import typing from collections.abc import Iterable import numpy as np _lowerCamelCase : str = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007 _lowerCamelCase : Tuple = typing.Union[np.floataa, int, float] # noqa: UP007 def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> VectorOut: """simple docstring""" return np.sqrt(np.sum((np.asarray(lowercase_ ) - np.asarray(lowercase_ )) ** 2 ) ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> VectorOut: """simple docstring""" return sum((va - va) ** 2 for va, va in zip(lowercase_ , lowercase_ ) ) ** (1 / 2) if __name__ == "__main__": def SCREAMING_SNAKE_CASE ( ) -> None: """simple docstring""" from timeit import timeit print('''Without Numpy''' ) print( timeit( '''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) ) print('''With Numpy''' ) print( timeit( '''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) ) benchmark()
14
0
"""simple docstring""" from typing import List from .keymap import KEYMAP, get_character def lowercase ( __snake_case : str ): def decorator(__snake_case : int ): lowercase_ : int = getattr(__snake_case , '''handle_key''' , [] ) handle += [key] setattr(__snake_case , '''handle_key''' , __snake_case ) return func return decorator def lowercase ( *__snake_case : List[str] ): def decorator(__snake_case : Dict ): lowercase_ : int = getattr(__snake_case , '''handle_key''' , [] ) handle += keys setattr(__snake_case , '''handle_key''' , __snake_case ) return func return decorator class _UpperCAmelCase ( _A ): def __new__( cls : List[str] , A : Tuple , A : str , A : Union[str, Any] ) -> str: lowercase_ : Optional[Any] = super().__new__(cls , A , A , A ) if not hasattr(A , '''key_handler''' ): setattr(A , '''key_handler''' , {} ) setattr(A , '''handle_input''' , KeyHandler.handle_input ) for value in attrs.values(): lowercase_ : Dict = getattr(A , '''handle_key''' , [] ) for key in handled_keys: lowercase_ : int = value return new_cls @staticmethod def A ( cls : Tuple ) -> Optional[Any]: lowercase_ : Dict = get_character() if char != KEYMAP["undefined"]: lowercase_ : str = ord(A ) lowercase_ : Any = cls.key_handler.get(A ) if handler: lowercase_ : Optional[int] = char return handler(cls ) else: return None def lowercase ( cls : List[str] ): return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
33
from ...processing_utils import ProcessorMixin class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''SpeechT5FeatureExtractor''' UpperCAmelCase__ = '''SpeechT5Tokenizer''' def __init__( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple) ->Union[str, Any]: '''simple docstring''' super().__init__(UpperCAmelCase__ , UpperCAmelCase__) def __call__( self : Dict , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Any) ->Optional[Any]: '''simple docstring''' A__ = kwargs.pop('''audio''' , UpperCAmelCase__) A__ = kwargs.pop('''text''' , UpperCAmelCase__) A__ = kwargs.pop('''text_target''' , UpperCAmelCase__) A__ = kwargs.pop('''audio_target''' , UpperCAmelCase__) A__ = kwargs.pop('''sampling_rate''' , UpperCAmelCase__) if audio is not None and text is not None: raise ValueError( '''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''') if audio_target is not None and text_target is not None: raise ValueError( '''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''') if audio is None and audio_target is None and text is None and text_target is None: raise ValueError( '''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''') if audio is not None: A__ = self.feature_extractor(UpperCAmelCase__ , *UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , **UpperCAmelCase__) elif text is not None: A__ = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__) else: A__ = None if audio_target is not None: A__ = self.feature_extractor(audio_target=UpperCAmelCase__ , *UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , **UpperCAmelCase__) A__ = targets['''input_values'''] elif text_target is not None: A__ = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__) A__ = targets['''input_ids'''] else: A__ = None if inputs is None: return targets if targets is not None: A__ = labels A__ = targets.get('''attention_mask''') if decoder_attention_mask is not None: A__ = decoder_attention_mask return inputs def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : int) ->Optional[int]: '''simple docstring''' A__ = kwargs.pop('''input_values''' , UpperCAmelCase__) A__ = kwargs.pop('''input_ids''' , UpperCAmelCase__) A__ = kwargs.pop('''labels''' , UpperCAmelCase__) if input_values is not None and input_ids is not None: raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''') if input_values is None and input_ids is None and labels is None: raise ValueError( '''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''') if input_values is not None: A__ = self.feature_extractor.pad(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__) elif input_ids is not None: A__ = self.tokenizer.pad(UpperCAmelCase__ , **UpperCAmelCase__) else: A__ = None if labels is not None: if "input_ids" in labels or (isinstance(UpperCAmelCase__ , UpperCAmelCase__) and "input_ids" in labels[0]): A__ = self.tokenizer.pad(UpperCAmelCase__ , **UpperCAmelCase__) A__ = targets['''input_ids'''] else: A__ = self.feature_extractor.feature_size A__ = self.feature_extractor.num_mel_bins A__ = self.feature_extractor.pad(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__) A__ = feature_size_hack A__ = targets['''input_values'''] else: A__ = None if inputs is None: return targets if targets is not None: A__ = labels A__ = targets.get('''attention_mask''') if decoder_attention_mask is not None: A__ = decoder_attention_mask return inputs def SCREAMING_SNAKE_CASE ( self : Any , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Optional[Any]) ->Optional[Any]: '''simple docstring''' return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Union[str, Any]) ->Dict: '''simple docstring''' return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__)
14
0
'''simple docstring''' from jiwer import compute_measures import datasets A ='\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n' A ='\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n' A ='\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _a ( datasets.Metric ): def A ( self : Any ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[ '''https://en.wikipedia.org/wiki/Word_error_rate''', ] , ) def A ( self : List[Any] , lowercase : List[str]=None , lowercase : List[str]=None , lowercase : Union[str, Any]=False ): '''simple docstring''' if concatenate_texts: return compute_measures(lowercase , lowercase )["wer"] else: UpperCAmelCase = 0 UpperCAmelCase = 0 for prediction, reference in zip(lowercase , lowercase ): UpperCAmelCase = compute_measures(lowercase , lowercase ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
34
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase : Tuple = logging.get_logger(__name__) _lowerCamelCase : str = { """microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""", } class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''git_vision_model''' def __init__( self : Any , UpperCAmelCase__ : Any=768 , UpperCAmelCase__ : int=3_072 , UpperCAmelCase__ : List[str]=12 , UpperCAmelCase__ : Dict=12 , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : List[Any]=224 , UpperCAmelCase__ : Union[str, Any]=16 , UpperCAmelCase__ : Union[str, Any]="quick_gelu" , UpperCAmelCase__ : Dict=1e-5 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Any=0.02 , **UpperCAmelCase__ : Any , ) ->Optional[int]: '''simple docstring''' super().__init__(**UpperCAmelCase__) A__ = hidden_size A__ = intermediate_size A__ = num_hidden_layers A__ = num_attention_heads A__ = num_channels A__ = patch_size A__ = image_size A__ = initializer_range A__ = attention_dropout A__ = layer_norm_eps A__ = hidden_act @classmethod def SCREAMING_SNAKE_CASE ( cls : Any , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : int) ->"PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(UpperCAmelCase__) A__ , A__ = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__) # get the vision config dict if we are loading from GITConfig if config_dict.get('''model_type''') == "git": A__ = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""") return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''git''' def __init__( self : Dict , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int=30_522 , UpperCAmelCase__ : Optional[int]=768 , UpperCAmelCase__ : Dict=6 , UpperCAmelCase__ : int=12 , UpperCAmelCase__ : List[str]=3_072 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=1_024 , UpperCAmelCase__ : List[str]=0.02 , UpperCAmelCase__ : Any=1e-12 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : List[Any]="absolute" , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : int=101 , UpperCAmelCase__ : Tuple=102 , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : List[str] , ) ->Any: '''simple docstring''' super().__init__(bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__) if vision_config is None: A__ = {} logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''') A__ = GitVisionConfig(**UpperCAmelCase__) A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = hidden_act A__ = intermediate_size A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = initializer_range A__ = layer_norm_eps A__ = position_embedding_type A__ = use_cache A__ = tie_word_embeddings A__ = num_image_with_embedding A__ = bos_token_id A__ = eos_token_id def SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]: '''simple docstring''' A__ = copy.deepcopy(self.__dict__) A__ = self.vision_config.to_dict() A__ = self.__class__.model_type return output
14
0
'''simple docstring''' from dataclasses import dataclass, field from typing import Optional @dataclass class UpperCAmelCase_ : """simple docstring""" lowercase = field( default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be trained."} ) lowercase = field( default="./" , metadata={"help": "Save dir where model repo is cloned and models updates are saved to."} ) lowercase = field( default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path of training dataset."} ) lowercase = field( default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} ) lowercase = field(default=2 , metadata={"help": "Batch size for training."} ) lowercase = field(default=2 , metadata={"help": "Batch size for evaluation."} ) lowercase = field(default=0.1 , metadata={"help": "Value of weight decay."} ) lowercase = field( default=1_00_00 , metadata={"help": "Size of buffer used to shuffle streaming dataset."} ) lowercase = field(default=2e-4 , metadata={"help": "Learning rate fo training."} ) lowercase = field(default="cosine" , metadata={"help": "Learning rate."} ) lowercase = field( default=7_50 , metadata={"help": "Number of warmup steps in the learning rate schedule."} ) lowercase = field( default=16 , metadata={"help": "Number of gradient accumulation steps."} ) lowercase = field( default=_a , metadata={"help": "Use gradient checkpointing to reduce memory footprint."} ) lowercase = field(default=5_00_00 , metadata={"help": "Maximum number of training steps."} ) lowercase = field( default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} ) lowercase = field(default=10_24 , metadata={"help": "Sequence lengths used for training."} ) lowercase = field(default=1 , metadata={"help": "Training seed."} ) lowercase = field( default=10_24 , metadata={"help": "Interval to save checkpoints. Measured as number of forward passes not training steps."} , ) lowercase = field( default=_a , metadata={"help": "States path if the training should continue from a checkpoint folder."} ) lowercase = field(default=_a , metadata={"help": "If True the data is pretokenized."} ) @dataclass class UpperCAmelCase_ : """simple docstring""" lowercase = field( default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} ) lowercase = field( default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} ) lowercase = field(default=2 , metadata={"help": "Batch size used for evaluation."} ) lowercase = field( default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} ) lowercase = field(default=10_24 , metadata={"help": "Length of sequences to be evaluated."} ) lowercase = field(default=1 , metadata={"help": "Random seed used for evaluation."} ) @dataclass class UpperCAmelCase_ : """simple docstring""" lowercase = field( default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} ) lowercase = field(default=_a , metadata={"help": "Number of workers used for code evaluation."} ) lowercase = field( default=_a , metadata={"help": "The number of human-eval tasks to run. If not included all tasks are evaluated."} , ) lowercase = field( default=_a , metadata={"help": "Sample from the language model's output distribution."} ) lowercase = field(default=0.2 , metadata={"help": "Sampling temperature used for generation."} ) lowercase = field(default=2_56 , metadata={"help": "Maximum number of newly generated tokens."} ) lowercase = field(default=0 , metadata={"help": "Top-k parameter used for generation."} ) lowercase = field(default=0.95 , metadata={"help": "Top-p parameter used for nucleus sampling."} ) lowercase = field(default=10 , metadata={"help": "Number of generations to run in parallel."} ) lowercase = field( default=2_00 , metadata={"help": "Number of completions to generate for each sample."} ) lowercase = field(default=1 , metadata={"help": "Random seed used for evaluation."} ) lowercase = field( default="eval_results.json" , metadata={"help": "Random seed used for evaluation."} ) lowercase = field( default="0" , metadata={"help": "Allow `code_eval` to execute Python code on machine"} ) lowercase = field( default=-1 , metadata={ "help": ( "Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive" " number corresponds to which GPU device id to run on." ) } , ) @dataclass class UpperCAmelCase_ : """simple docstring""" lowercase = field( default=_a , metadata={ "help": "The number of CPU cores to use for parallel preprocessing. Default uses the maximum available." } , ) lowercase = field( default="transformersbook/codeparrot" , metadata={"help": "Folder or name of dataset to process."} ) lowercase = field( default="codeparrot-clean" , metadata={"help": "Folder to save processed processed dataset."} ) lowercase = field( default=10_00_00 , metadata={"help": "Number of files to save per JSON output file."} ) lowercase = field(default="content" , metadata={"help": "Column containing text data to process."} ) lowercase = field( default=10_00 , metadata={"help": "Maximum line length in file, otherwise file is filtered."} ) lowercase = field( default=1_00 , metadata={"help": "Maximum mean line length in file, otherwise file is filtered."} ) lowercase = field( default=0.25 , metadata={"help": "Maximum fraction of non-alphanumeric characters, otherwise file is filtered."} ) lowercase = field( default=1.5 , metadata={"help": "Minimum character token ratio for the file, otherwise file is filtered."} ) lowercase = field( default=0.7 , metadata={"help": "Probability for filtering config, test and uncommon files."} ) lowercase = field( default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} , ) lowercase = field( default=_a , metadata={"help": "If True, near-duplicate samples are removed."} ) lowercase = field( default=0.85 , metadata={"help": "Jaccard threshold for near-duplicate samples."} ) @dataclass class UpperCAmelCase_ : """simple docstring""" lowercase = field( default="gpt2" , metadata={"help": "Base tokenizer to build new tokenizer from."} ) lowercase = field( default="transformersbook/codeparrot-train" , metadata={"help": "Dataset to train tokenizer on."} ) lowercase = field(default="content" , metadata={"help": "Column containing text data to process."} ) lowercase = field(default=20_00_00 , metadata={"help": "Number of examples to train tokenizer on."} ) lowercase = field( default=3_27_68 , metadata={"help": "Number of examples to train the tokenizer on."} ) lowercase = field(default="codeparrot" , metadata={"help": "Name of new tokenizer."} ) lowercase = field(default=_a , metadata={"help": "Push saved tokenizer to the hub."} ) @dataclass class UpperCAmelCase_ : """simple docstring""" lowercase = field( default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} ) lowercase = field( default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path to the dataset to pretokenize."} ) lowercase = field( default="tokenized-codeparrot-train" , metadata={"help": "Repo name of the pretokenized data."} ) lowercase = field(default=_a , metadata={"help": "Number of workers used for code evaluation."} ) @dataclass class UpperCAmelCase_ : """simple docstring""" lowercase = field( default="gpt2-large" , metadata={"help": "Configuration to use for model initialization."} ) lowercase = field( default="codeparrot/codeparrot" , metadata={"help": "Tokenizer attached to model."} ) lowercase = field(default="codeparrot" , metadata={"help": "Name of the created model."} ) lowercase = field(default=_a , metadata={"help": "Push saved tokenizer to the hub."} )
35
import requests from bsa import BeautifulSoup def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str: """simple docstring""" A__ = BeautifulSoup(requests.get(lowercase_ , params=lowercase_ ).content , '''html.parser''' ) A__ = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} ) A__ = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' ) return anchors[2].get_text() if __name__ == "__main__": _lowerCamelCase : Optional[Any] = { """title""": ( """Precisely geometry controlled microsupercapacitors for ultrahigh areal """ """capacitance, volumetric capacitance, and energy density""" ), """journal""": """Chem. Mater.""", """volume""": 30, """pages""": """3979-3990""", """year""": 2018, """hl""": """en""", } print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
14
0
import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class UpperCAmelCase_ ( a): def __init__( self, __a, __a, __a): '''simple docstring''' _lowerCAmelCase : List[str] = dataset _lowerCAmelCase : str = process _lowerCAmelCase : Optional[Any] = params def __len__( self): '''simple docstring''' return len(self.dataset) def __getitem__( self, __a): '''simple docstring''' _lowerCAmelCase : Dict = self.dataset[i] _lowerCAmelCase : Tuple = self.process(__a, **self.params) return processed class UpperCAmelCase_ ( a): def __init__( self, __a, __a, __a, __a=None): '''simple docstring''' _lowerCAmelCase : str = loader _lowerCAmelCase : List[Any] = infer _lowerCAmelCase : Optional[int] = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether _lowerCAmelCase : Optional[int] = None _lowerCAmelCase : List[Any] = loader_batch_size # Internal bookkeeping _lowerCAmelCase : Tuple = None _lowerCAmelCase : List[str] = None def __len__( self): '''simple docstring''' return len(self.loader) def __iter__( self): '''simple docstring''' _lowerCAmelCase : str = iter(self.loader) return self def snake_case__ ( self): '''simple docstring''' if isinstance(self._loader_batch_data, torch.Tensor): # Batch data is simple tensor, just fetch the slice _lowerCAmelCase : List[Any] = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) _lowerCAmelCase : Union[str, Any] = {} for k, element in self._loader_batch_data.items(): if isinstance(__a, __a): # Convert ModelOutput to tuple first _lowerCAmelCase : Optional[int] = element.to_tuple() if isinstance(element[0], torch.Tensor): _lowerCAmelCase : Dict = tuple(el[self._loader_batch_index].unsqueeze(0) for el in element) elif isinstance(element[0], np.ndarray): _lowerCAmelCase : int = tuple(np.expand_dims(el[self._loader_batch_index], 0) for el in element) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(__a, __a): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0], torch.Tensor): _lowerCAmelCase : Optional[Any] = tuple(el[self._loader_batch_index].unsqueeze(0) for el in element) elif isinstance(element[0], np.ndarray): _lowerCAmelCase : Optional[Any] = tuple(np.expand_dims(el[self._loader_batch_index], 0) for el in element) continue if element is None: # This can happen for optional data that get passed around _lowerCAmelCase : str = None elif isinstance(element[self._loader_batch_index], torch.Tensor): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _lowerCAmelCase : Tuple = element[self._loader_batch_index].unsqueeze(0) elif isinstance(element[self._loader_batch_index], np.ndarray): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _lowerCAmelCase : Tuple = np.expand_dims(element[self._loader_batch_index], 0) else: # This is typically a list, so no need to `unsqueeze`. _lowerCAmelCase : Optional[int] = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 _lowerCAmelCase : str = self._loader_batch_data.__class__(__a) self._loader_batch_index += 1 return result def snake_case__ ( self): '''simple docstring''' if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch _lowerCAmelCase : Union[str, Any] = next(self.iterator) _lowerCAmelCase : Optional[int] = self.infer(__a, **self.params) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(__a, torch.Tensor): _lowerCAmelCase : Any = processed else: _lowerCAmelCase : List[Any] = list(processed.keys())[0] _lowerCAmelCase : List[Any] = processed[key] if isinstance(__a, __a): _lowerCAmelCase : Optional[int] = len(__a) else: _lowerCAmelCase : List[str] = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _lowerCAmelCase : Any = observed_batch_size # Setting internal index to unwrap the batch _lowerCAmelCase : Tuple = processed _lowerCAmelCase : Tuple = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class UpperCAmelCase_ ( a): def __init__( self, __a, __a, __a, __a=None): '''simple docstring''' super().__init__(__a, __a, __a) def __iter__( self): '''simple docstring''' _lowerCAmelCase : Tuple = iter(self.loader) _lowerCAmelCase : Dict = None return self def snake_case__ ( self): '''simple docstring''' if self.subiterator is None: _lowerCAmelCase : int = self.infer(next(self.iterator), **self.params) try: # Try to return next item _lowerCAmelCase : Union[str, Any] = next(self.subiterator) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators _lowerCAmelCase : List[str] = self.infer(next(self.iterator), **self.params) _lowerCAmelCase : Optional[Any] = next(self.subiterator) return processed class UpperCAmelCase_ ( a): def __iter__( self): '''simple docstring''' _lowerCAmelCase : Any = iter(self.loader) return self def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Dict = False _lowerCAmelCase : Dict = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: _lowerCAmelCase : Union[str, Any] = self.loader_batch_item() _lowerCAmelCase : int = item.pop("is_last") accumulator.append(__a) if is_last: return accumulator while not is_last: _lowerCAmelCase : Dict = self.infer(next(self.iterator), **self.params) if self.loader_batch_size is not None: if isinstance(__a, torch.Tensor): _lowerCAmelCase : List[str] = processed else: _lowerCAmelCase : Any = list(processed.keys())[0] _lowerCAmelCase : Dict = processed[key] if isinstance(__a, __a): _lowerCAmelCase : Tuple = len(__a) else: _lowerCAmelCase : List[str] = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _lowerCAmelCase : List[Any] = observed_batch_size _lowerCAmelCase : Optional[int] = processed _lowerCAmelCase : List[str] = 0 while self._loader_batch_index < self.loader_batch_size: _lowerCAmelCase : Union[str, Any] = self.loader_batch_item() _lowerCAmelCase : Optional[int] = item.pop("is_last") accumulator.append(__a) if is_last: return accumulator else: _lowerCAmelCase : Optional[int] = processed _lowerCAmelCase : List[str] = item.pop("is_last") accumulator.append(__a) return accumulator class UpperCAmelCase_ ( a): def __init__( self, __a, __a): '''simple docstring''' _lowerCAmelCase : Tuple = dataset _lowerCAmelCase : Optional[int] = key def __len__( self): '''simple docstring''' return len(self.dataset) def __getitem__( self, __a): '''simple docstring''' return self.dataset[i][self.key] class UpperCAmelCase_ ( a): def __init__( self, __a, __a, __a): '''simple docstring''' _lowerCAmelCase : Optional[int] = dataset _lowerCAmelCase : Any = keya _lowerCAmelCase : List[Any] = keya def __len__( self): '''simple docstring''' return len(self.dataset) def __getitem__( self, __a): '''simple docstring''' return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
36
import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[Any]: """simple docstring""" A__ = StableDiffusionPipeline.from_pretrained(lowercase_ , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors A__ = load_file(lowercase_ ) A__ = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: A__ = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' ) A__ = pipeline.text_encoder else: A__ = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' ) A__ = pipeline.unet # find the target layer A__ = layer_infos.pop(0 ) while len(lowercase_ ) > -1: try: A__ = curr_layer.__getattr__(lowercase_ ) if len(lowercase_ ) > 0: A__ = layer_infos.pop(0 ) elif len(lowercase_ ) == 0: break except Exception: if len(lowercase_ ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: A__ = layer_infos.pop(0 ) A__ = [] if "lora_down" in key: pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) ) pair_keys.append(lowercase_ ) else: pair_keys.append(lowercase_ ) pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: A__ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) A__ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ ).unsqueeze(2 ).unsqueeze(3 ) else: A__ = state_dict[pair_keys[0]].to(torch.floataa ) A__ = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ ) # update visited list for item in pair_keys: visited.append(lowercase_ ) return pipeline if __name__ == "__main__": _lowerCamelCase : Tuple = argparse.ArgumentParser() parser.add_argument( """--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format.""" ) parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors""" ) parser.add_argument( """--lora_prefix_text_encoder""", default="""lora_te""", type=str, help="""The prefix of text encoder weight in safetensors""", ) parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""") parser.add_argument( """--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not.""" ) parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""") _lowerCamelCase : Tuple = parser.parse_args() _lowerCamelCase : List[Any] = args.base_model_path _lowerCamelCase : Optional[int] = args.checkpoint_path _lowerCamelCase : Dict = args.dump_path _lowerCamelCase : Optional[Any] = args.lora_prefix_unet _lowerCamelCase : Optional[int] = args.lora_prefix_text_encoder _lowerCamelCase : List[Any] = args.alpha _lowerCamelCase : int = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) _lowerCamelCase : Tuple = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
14
0
'''simple docstring''' # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import platform import numpy as np import psutil import torch from accelerate import __version__ as version from accelerate.commands.config import default_config_file, load_config_from_file from ..utils import is_npu_available, is_xpu_available def _SCREAMING_SNAKE_CASE ( UpperCamelCase=None ): """simple docstring""" if subparsers is not None: lowerCAmelCase__ : Any = subparsers.add_parser("""env""" ) else: lowerCAmelCase__ : List[str] = argparse.ArgumentParser("""Accelerate env command""" ) parser.add_argument( """--config_file""" , default=UpperCamelCase , help="""The config file to use for the default values in the launching script.""" ) if subparsers is not None: parser.set_defaults(func=UpperCamelCase ) return parser def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : List[str] = torch.__version__ lowerCAmelCase__ : List[Any] = torch.cuda.is_available() lowerCAmelCase__ : Optional[int] = is_xpu_available() lowerCAmelCase__ : Any = is_npu_available() lowerCAmelCase__ : str = """Not found""" # Get the default from the config file. if args.config_file is not None or os.path.isfile(UpperCamelCase ): lowerCAmelCase__ : int = load_config_from_file(args.config_file ).to_dict() lowerCAmelCase__ : Optional[Any] = { """`Accelerate` version""": version, """Platform""": platform.platform(), """Python version""": platform.python_version(), """Numpy version""": np.__version__, """PyTorch version (GPU?)""": f"""{pt_version} ({pt_cuda_available})""", """PyTorch XPU available""": str(UpperCamelCase ), """PyTorch NPU available""": str(UpperCamelCase ), """System RAM""": f"""{psutil.virtual_memory().total / 1024 ** 3:.2f} GB""", } if pt_cuda_available: lowerCAmelCase__ : List[Any] = torch.cuda.get_device_name() print("""\nCopy-and-paste the text below in your GitHub issue\n""" ) print("""\n""".join([f"""- {prop}: {val}""" for prop, val in info.items()] ) ) print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""" ) lowerCAmelCase__ : Dict = ( """\n""".join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] ) if isinstance(UpperCamelCase , UpperCamelCase ) else f"""\t{accelerate_config}""" ) print(UpperCamelCase ) lowerCAmelCase__ : str = accelerate_config return info def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" lowerCAmelCase__ : Optional[Any] = env_command_parser() lowerCAmelCase__ : Optional[Any] = parser.parse_args() env_command(UpperCamelCase ) return 0 if __name__ == "__main__": raise SystemExit(main())
37
import os import pytest from transformers.dynamic_module_utils import get_imports _lowerCamelCase : Any = """ import os """ _lowerCamelCase : Optional[int] = """ def foo(): import os return False """ _lowerCamelCase : List[Any] = """ def foo(): def bar(): if True: import os return False return bar() """ _lowerCamelCase : List[Any] = """ import os try: import bar except ImportError: raise ValueError() """ _lowerCamelCase : Union[str, Any] = """ import os def foo(): try: import bar except ImportError: raise ValueError() """ _lowerCamelCase : List[Any] = """ import os try: import bar except (ImportError, AttributeError): raise ValueError() """ _lowerCamelCase : List[Any] = """ import os try: import bar except ImportError as e: raise ValueError() """ _lowerCamelCase : str = """ import os try: import bar except: raise ValueError() """ _lowerCamelCase : Optional[Any] = """ import os try: import bar import baz except ImportError: raise ValueError() """ _lowerCamelCase : Any = """ import os try: import bar import baz except ImportError: x = 1 raise ValueError() """ _lowerCamelCase : Dict = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize('''case''' , lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int: """simple docstring""" A__ = os.path.join(lowercase_ , '''test_file.py''' ) with open(lowercase_ , '''w''' ) as _tmp_file: _tmp_file.write(lowercase_ ) A__ = get_imports(lowercase_ ) assert parsed_imports == ["os"]
14
0
import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Tuple , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : str=True , __magic_name__ : List[Any]="pt" ) -> int: """simple docstring""" UpperCamelCase :Union[str, Any] = {"""add_prefix_space""": True} if isinstance(__magic_name__ , __magic_name__ ) and not line.startswith(""" """ ) else {} UpperCamelCase :Optional[Any] = padding_side return tokenizer( [line] , max_length=__magic_name__ , padding="""max_length""" if pad_to_max_length else None , truncation=__magic_name__ , return_tensors=__magic_name__ , add_special_tokens=__magic_name__ , **__magic_name__ , ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : Optional[Any]=None , ) -> Optional[Any]: """simple docstring""" UpperCamelCase :Optional[int] = input_ids.ne(__magic_name__ ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class _SCREAMING_SNAKE_CASE ( _a ): def __init__( self : int , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any]="train" , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : str=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[Any]="" , ): super().__init__() UpperCamelCase :str = Path(__lowerCamelCase ).joinpath(type_path + """.source""" ) UpperCamelCase :Dict = Path(__lowerCamelCase ).joinpath(type_path + """.target""" ) UpperCamelCase :str = self.get_char_lens(self.src_file ) UpperCamelCase :Tuple = max_source_length UpperCamelCase :List[Any] = max_target_length assert min(self.src_lens ) > 0, F"""found empty line in {self.src_file}""" UpperCamelCase :str = tokenizer UpperCamelCase :Any = prefix if n_obs is not None: UpperCamelCase :Dict = self.src_lens[:n_obs] UpperCamelCase :int = src_lang UpperCamelCase :Optional[int] = tgt_lang def __len__( self : List[str] ): return len(self.src_lens ) def __getitem__( self : Union[str, Any] , __lowerCamelCase : Dict ): UpperCamelCase :str = index + 1 # linecache starts at 1 UpperCamelCase :Optional[int] = self.prefix + linecache.getline(str(self.src_file ) , __lowerCamelCase ).rstrip("""\n""" ) UpperCamelCase :Optional[Any] = linecache.getline(str(self.tgt_file ) , __lowerCamelCase ).rstrip("""\n""" ) assert source_line, F"""empty source line for index {index}""" assert tgt_line, F"""empty tgt line for index {index}""" # Need to add eos token manually for T5 if isinstance(self.tokenizer , __lowerCamelCase ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right UpperCamelCase :Tuple = ( self.tokenizer.question_encoder if isinstance(self.tokenizer , __lowerCamelCase ) else self.tokenizer ) UpperCamelCase :str = self.tokenizer.generator if isinstance(self.tokenizer , __lowerCamelCase ) else self.tokenizer UpperCamelCase :Optional[int] = encode_line(__lowerCamelCase , __lowerCamelCase , self.max_source_length , """right""" ) UpperCamelCase :Union[str, Any] = encode_line(__lowerCamelCase , __lowerCamelCase , self.max_target_length , """right""" ) UpperCamelCase :Dict = source_inputs["""input_ids"""].squeeze() UpperCamelCase :Tuple = target_inputs["""input_ids"""].squeeze() UpperCamelCase :int = source_inputs["""attention_mask"""].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def _A ( __lowerCamelCase : Tuple ): return [len(__lowerCamelCase ) for x in Path(__lowerCamelCase ).open().readlines()] def _A ( self : Any , __lowerCamelCase : int ): UpperCamelCase :Optional[int] = torch.stack([x["""input_ids"""] for x in batch] ) UpperCamelCase :Tuple = torch.stack([x["""attention_mask"""] for x in batch] ) UpperCamelCase :Union[str, Any] = torch.stack([x["""decoder_input_ids"""] for x in batch] ) UpperCamelCase :List[Any] = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer , __lowerCamelCase ) else self.tokenizer.pad_token_id ) UpperCamelCase :List[str] = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer , __lowerCamelCase ) else self.tokenizer.pad_token_id ) UpperCamelCase :Union[str, Any] = trim_batch(__lowerCamelCase , __lowerCamelCase ) UpperCamelCase , UpperCamelCase :str = trim_batch(__lowerCamelCase , __lowerCamelCase , attention_mask=__lowerCamelCase ) UpperCamelCase :List[str] = { """input_ids""": source_ids, """attention_mask""": source_mask, """decoder_input_ids""": y, } return batch UpperCAmelCase_ : str = getLogger(__name__) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[List] ) -> Dict: """simple docstring""" return list(itertools.chain.from_iterable(__magic_name__ ) ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str ) -> None: """simple docstring""" UpperCamelCase :List[str] = get_git_info() save_json(__magic_name__ , os.path.join(__magic_name__ , """git_log.json""" ) ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str , __magic_name__ : int , __magic_name__ : List[str]=4 , **__magic_name__ : Dict ) -> str: """simple docstring""" with open(__magic_name__ , """w""" ) as f: json.dump(__magic_name__ , __magic_name__ , indent=__magic_name__ , **__magic_name__ ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Union[str, Any] ) -> Tuple: """simple docstring""" with open(__magic_name__ ) as f: return json.load(__magic_name__ ) def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]: """simple docstring""" UpperCamelCase :List[str] = git.Repo(search_parent_directories=__magic_name__ ) UpperCamelCase :Optional[Any] = { """repo_id""": str(__magic_name__ ), """repo_sha""": str(repo.head.object.hexsha ), """repo_branch""": str(repo.active_branch ), """hostname""": str(socket.gethostname() ), } return repo_infos def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Callable , __magic_name__ : Iterable ) -> List: """simple docstring""" return list(map(__magic_name__ , __magic_name__ ) ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] , __magic_name__ : Tuple ) -> Optional[Any]: """simple docstring""" with open(__magic_name__ , """wb""" ) as f: return pickle.dump(__magic_name__ , __magic_name__ ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Tuple ) -> List[str]: """simple docstring""" def remove_articles(__magic_name__ : List[Any] ): return re.sub(R"""\b(a|an|the)\b""" , """ """ , __magic_name__ ) def white_space_fix(__magic_name__ : Any ): return " ".join(text.split() ) def remove_punc(__magic_name__ : int ): UpperCamelCase :Union[str, Any] = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(__magic_name__ : Union[str, Any] ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(__magic_name__ ) ) ) ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : List[str] ) -> Optional[int]: """simple docstring""" UpperCamelCase :Dict = normalize_answer(__magic_name__ ).split() UpperCamelCase :Union[str, Any] = normalize_answer(__magic_name__ ).split() UpperCamelCase :str = Counter(__magic_name__ ) & Counter(__magic_name__ ) UpperCamelCase :Dict = sum(common.values() ) if num_same == 0: return 0 UpperCamelCase :List[str] = 1.0 * num_same / len(__magic_name__ ) UpperCamelCase :Tuple = 1.0 * num_same / len(__magic_name__ ) UpperCamelCase :Optional[Any] = (2 * precision * recall) / (precision + recall) return fa def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : int ) -> List[str]: """simple docstring""" return normalize_answer(__magic_name__ ) == normalize_answer(__magic_name__ ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[str] , __magic_name__ : List[str] ) -> Dict: """simple docstring""" assert len(__magic_name__ ) == len(__magic_name__ ) UpperCamelCase :Dict = 0 for hypo, pred in zip(__magic_name__ , __magic_name__ ): em += exact_match_score(__magic_name__ , __magic_name__ ) if len(__magic_name__ ) > 0: em /= len(__magic_name__ ) return {"em": em} def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[str] ) -> Union[str, Any]: """simple docstring""" return model_prefix.startswith("""rag""" ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[int] , __magic_name__ : int , __magic_name__ : Optional[int] ) -> Optional[Any]: """simple docstring""" UpperCamelCase :Dict = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead UpperCamelCase :str = """dropout_rate""" for p in extra_params: if getattr(__magic_name__ , __magic_name__ , __magic_name__ ): if not hasattr(__magic_name__ , __magic_name__ ) and not hasattr(__magic_name__ , equivalent_param[p] ): logger.info("""config doesn't have a `{}` attribute""".format(__magic_name__ ) ) delattr(__magic_name__ , __magic_name__ ) continue UpperCamelCase :List[Any] = p if hasattr(__magic_name__ , __magic_name__ ) else equivalent_param[p] setattr(__magic_name__ , __magic_name__ , getattr(__magic_name__ , __magic_name__ ) ) delattr(__magic_name__ , __magic_name__ ) return hparams, config
38
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int: """simple docstring""" return int(input_a == input_a == 0 ) def SCREAMING_SNAKE_CASE ( ) -> None: """simple docstring""" print('''Truth Table of NOR Gate:''' ) print('''| Input 1 | Input 2 | Output |''' ) print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" ) print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" ) print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" ) print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" ) if __name__ == "__main__": import doctest doctest.testmod() main()
14
0
import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig _a = logging.get_logger(__name__) class __lowerCamelCase : """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase ): """simple docstring""" _UpperCAmelCase = question_encoder _UpperCAmelCase = generator _UpperCAmelCase = self.question_encoder def UpperCamelCase ( self , UpperCAmelCase ): """simple docstring""" if os.path.isfile(UpperCAmelCase ): raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" ) os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase ) _UpperCAmelCase = os.path.join(UpperCAmelCase , 'question_encoder_tokenizer' ) _UpperCAmelCase = os.path.join(UpperCAmelCase , 'generator_tokenizer' ) self.question_encoder.save_pretrained(UpperCAmelCase ) self.generator.save_pretrained(UpperCAmelCase ) @classmethod def UpperCamelCase ( cls , UpperCAmelCase , **UpperCAmelCase ): """simple docstring""" from ..auto.tokenization_auto import AutoTokenizer _UpperCAmelCase = kwargs.pop('config' , UpperCAmelCase ) if config is None: _UpperCAmelCase = RagConfig.from_pretrained(UpperCAmelCase ) _UpperCAmelCase = AutoTokenizer.from_pretrained( UpperCAmelCase , config=config.question_encoder , subfolder='question_encoder_tokenizer' ) _UpperCAmelCase = AutoTokenizer.from_pretrained( UpperCAmelCase , config=config.generator , subfolder='generator_tokenizer' ) return cls(question_encoder=UpperCAmelCase , generator=UpperCAmelCase ) def __call__( self , *UpperCAmelCase , **UpperCAmelCase ): """simple docstring""" return self.current_tokenizer(*UpperCAmelCase , **UpperCAmelCase ) def UpperCamelCase ( self , *UpperCAmelCase , **UpperCAmelCase ): """simple docstring""" return self.generator.batch_decode(*UpperCAmelCase , **UpperCAmelCase ) def UpperCamelCase ( self , *UpperCAmelCase , **UpperCAmelCase ): """simple docstring""" return self.generator.decode(*UpperCAmelCase , **UpperCAmelCase ) def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = self.question_encoder def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = self.generator def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = "longest" , UpperCAmelCase = None , UpperCAmelCase = True , **UpperCAmelCase , ): """simple docstring""" warnings.warn( '`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the ' 'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` ' 'context manager to prepare your targets. See the documentation of your specific tokenizer for more ' 'details' , UpperCAmelCase , ) if max_length is None: _UpperCAmelCase = self.current_tokenizer.model_max_length _UpperCAmelCase = self( UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_tensors=UpperCAmelCase , max_length=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , **UpperCAmelCase , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: _UpperCAmelCase = self.current_tokenizer.model_max_length _UpperCAmelCase = self( text_target=UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_tensors=UpperCAmelCase , padding=UpperCAmelCase , max_length=UpperCAmelCase , truncation=UpperCAmelCase , **UpperCAmelCase , ) _UpperCAmelCase = labels['input_ids'] return model_inputs
39
import os import sys import unittest _lowerCamelCase : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) _lowerCamelCase : Any = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""") _lowerCamelCase : str = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""") class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple: '''simple docstring''' A__ = get_test_to_tester_mapping(UpperCAmelCase__) A__ = get_test_to_tester_mapping(UpperCAmelCase__) A__ = {'''BertModelTest''': '''BertModelTester'''} A__ = { '''BlipModelTest''': '''BlipModelTester''', '''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''', '''BlipTextModelTest''': '''BlipTextModelTester''', '''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''', '''BlipVQAModelTest''': '''BlipVQAModelTester''', '''BlipVisionModelTest''': '''BlipVisionModelTester''', } self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__) self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[Any]: '''simple docstring''' A__ = get_model_to_test_mapping(UpperCAmelCase__) A__ = get_model_to_test_mapping(UpperCAmelCase__) A__ = { '''BertForMaskedLM''': ['''BertModelTest'''], '''BertForMultipleChoice''': ['''BertModelTest'''], '''BertForNextSentencePrediction''': ['''BertModelTest'''], '''BertForPreTraining''': ['''BertModelTest'''], '''BertForQuestionAnswering''': ['''BertModelTest'''], '''BertForSequenceClassification''': ['''BertModelTest'''], '''BertForTokenClassification''': ['''BertModelTest'''], '''BertLMHeadModel''': ['''BertModelTest'''], '''BertModel''': ['''BertModelTest'''], } A__ = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''], '''BlipModel''': ['''BlipModelTest'''], '''BlipTextModel''': ['''BlipTextModelTest'''], '''BlipVisionModel''': ['''BlipVisionModelTest'''], } self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__) self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->str: '''simple docstring''' A__ = get_model_to_tester_mapping(UpperCAmelCase__) A__ = get_model_to_tester_mapping(UpperCAmelCase__) A__ = { '''BertForMaskedLM''': ['''BertModelTester'''], '''BertForMultipleChoice''': ['''BertModelTester'''], '''BertForNextSentencePrediction''': ['''BertModelTester'''], '''BertForPreTraining''': ['''BertModelTester'''], '''BertForQuestionAnswering''': ['''BertModelTester'''], '''BertForSequenceClassification''': ['''BertModelTester'''], '''BertForTokenClassification''': ['''BertModelTester'''], '''BertLMHeadModel''': ['''BertModelTester'''], '''BertModel''': ['''BertModelTester'''], } A__ = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''], '''BlipModel''': ['''BlipModelTester'''], '''BlipTextModel''': ['''BlipTextModelTester'''], '''BlipVisionModel''': ['''BlipVisionModelTester'''], } self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__) self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
14
0
"""simple docstring""" from packaging import version from .import_utils import is_accelerate_available if is_accelerate_available(): import accelerate def lowercase ( A_ )-> Optional[int]: '''simple docstring''' if not is_accelerate_available(): return method a : Any = version.parse(accelerate.__version__ ).base_version if version.parse(A_ ) < version.parse("0.17.0" ): return method def wrapper(self , *A_ , **A_ ): if hasattr(self , "_hf_hook" ) and hasattr(self._hf_hook , "pre_forward" ): self._hf_hook.pre_forward(self ) return method(self , *A_ , **A_ ) return wrapper
40
import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class UpperCamelCase_ : '''simple docstring''' def __init__( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int = 13 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 128 , UpperCAmelCase__ : Optional[Any]=[16, 32, 64, 128] , UpperCAmelCase__ : int = 7 , UpperCAmelCase__ : int = 4 , UpperCAmelCase__ : int = 37 , UpperCAmelCase__ : str = "gelu" , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : int = 10 , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 128 , UpperCAmelCase__ : List[int] = [2, 2, 2, 2] , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , ) ->List[Any]: '''simple docstring''' A__ = parent A__ = batch_size A__ = image_size A__ = patch_size A__ = num_channels A__ = is_training A__ = use_labels A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = type_sequence_label_size A__ = initializer_range A__ = encoder_stride A__ = num_attention_outputs A__ = embed_dim A__ = embed_dim + 1 A__ = resolution A__ = depths A__ = hidden_sizes A__ = dim A__ = mlp_expansion_ratio def SCREAMING_SNAKE_CASE ( self : List[Any]) ->str: '''simple docstring''' A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size) A__ = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE ( self : int) ->str: '''simple docstring''' return EfficientFormerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict) ->Dict: '''simple docstring''' A__ = TFEfficientFormerModel(config=UpperCAmelCase__) A__ = model(UpperCAmelCase__ , training=UpperCAmelCase__) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str) ->Union[str, Any]: '''simple docstring''' A__ = self.type_sequence_label_size A__ = TFEfficientFormerForImageClassification(UpperCAmelCase__) A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__ , training=UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) # test greyscale images A__ = 1 A__ = TFEfficientFormerForImageClassification(UpperCAmelCase__) A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def SCREAMING_SNAKE_CASE ( self : int) ->List[str]: '''simple docstring''' A__ = self.prepare_config_and_inputs() A__ , A__ , A__ = config_and_inputs A__ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = ( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) UpperCAmelCase__ = ( { '''feature-extraction''': TFEfficientFormerModel, '''image-classification''': ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[str]: '''simple docstring''' A__ = TFEfficientFormerModelTester(self) A__ = ConfigTester( self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37) def SCREAMING_SNAKE_CASE ( self : int) ->Any: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''EfficientFormer does not use inputs_embeds''') def SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict: '''simple docstring''' pass @unittest.skip(reason='''EfficientFormer does not support input and output embeddings''') def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]: '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[Any]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(UpperCAmelCase__) A__ = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : str) ->Any: '''simple docstring''' def check_hidden_states_output(UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict): A__ = model_class(UpperCAmelCase__) A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__) A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A__ = getattr( self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1) self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__) if hasattr(self.model_tester , '''encoder_seq_length'''): A__ = self.model_tester.encoder_seq_length if hasattr(self.model_tester , '''chunk_length''') and self.model_tester.chunk_length > 1: A__ = seq_length * self.model_tester.chunk_length else: A__ = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , ) if config.is_encoder_decoder: A__ = outputs.decoder_hidden_states self.asseretIsInstance(UpperCAmelCase__ , (list, tuple)) self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__) A__ = getattr(self.model_tester , '''seq_length''' , UpperCAmelCase__) A__ = getattr(self.model_tester , '''decoder_seq_length''' , UpperCAmelCase__) self.assertListEqual( list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , ) A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = True check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A__ = True check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict=False) ->int: '''simple docstring''' A__ = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__) @unittest.skip(reason='''EfficientFormer does not implement masked image modeling yet''') def SCREAMING_SNAKE_CASE ( self : str) ->str: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Any) ->Tuple: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__) @slow def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]: '''simple docstring''' for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = TFEfficientFormerModel.from_pretrained(UpperCAmelCase__) self.assertIsNotNone(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Any) ->str: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = True A__ = getattr(self.model_tester , '''seq_length''' , UpperCAmelCase__) A__ = getattr(self.model_tester , '''encoder_seq_length''' , UpperCAmelCase__) A__ = getattr(self.model_tester , '''key_length''' , UpperCAmelCase__) A__ = getattr(self.model_tester , '''chunk_length''' , UpperCAmelCase__) if chunk_length is not None and hasattr(self.model_tester , '''num_hashes'''): A__ = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: A__ = True A__ = False A__ = True A__ = model_class(UpperCAmelCase__) A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__) A__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(UpperCAmelCase__) , self.model_tester.num_attention_outputs) # check that output_attentions also work using config del inputs_dict["output_attentions"] A__ = True A__ = model_class(UpperCAmelCase__) A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__) A__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(UpperCAmelCase__) , self.model_tester.num_attention_outputs) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , ) else: self.assertListEqual( list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , ) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model A__ = model_class(UpperCAmelCase__) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes A__ = { key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=UpperCAmelCase__) for key, val in model.input_signature.items() if key in model.dummy_inputs } A__ = model(UpperCAmelCase__) self.assertTrue(outputs_dict is not None) def SCREAMING_SNAKE_CASE ( ) -> Any: """simple docstring""" A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: '''simple docstring''' return ( EfficientFormerImageProcessor.from_pretrained('''snap-research/efficientformer-l1-300''') if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any: '''simple docstring''' A__ = TFEfficientFormerForImageClassification.from_pretrained('''snap-research/efficientformer-l1-300''') A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''tf''') # forward pass A__ = model(**UpperCAmelCase__ , training=UpperCAmelCase__) # verify the logits A__ = tf.TensorShape((1, 1_000)) self.assertEqual(outputs.logits.shape , UpperCAmelCase__) A__ = tf.constant([-0.0555, 0.4825, -0.0852]) self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4)) @slow def SCREAMING_SNAKE_CASE ( self : Dict) ->int: '''simple docstring''' A__ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( '''snap-research/efficientformer-l1-300''') A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''tf''') # forward pass A__ = model(**UpperCAmelCase__ , training=UpperCAmelCase__) # verify the logits A__ = tf.TensorShape((1, 1_000)) self.assertEqual(outputs.logits.shape , UpperCAmelCase__) A__ = tf.constant([-0.1312, 0.4353, -1.0499]) self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4))
14
0
'''simple docstring''' # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/ import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetImgaImgPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class _lowercase ( _lowercase , _lowercase , _lowercase , unittest.TestCase ): a = StableDiffusionControlNetImgaImgPipeline a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""} a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS a = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"""control_image"""} ) a = IMAGE_TO_IMAGE_IMAGE_PARAMS def lowerCamelCase_ ( self: Any ): torch.manual_seed(0 ) lowerCamelCase__ : List[str] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) torch.manual_seed(0 ) lowerCamelCase__ : List[Any] = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) torch.manual_seed(0 ) lowerCamelCase__ : List[Any] = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , ) torch.manual_seed(0 ) lowerCamelCase__ : List[str] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) lowerCamelCase__ : Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) lowerCamelCase__ : str = CLIPTextModel(UpperCamelCase__ ) lowerCamelCase__ : Any = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowerCamelCase__ : Dict = { """unet""": unet, """controlnet""": controlnet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def lowerCamelCase_ ( self: str , UpperCamelCase__: List[Any] , UpperCamelCase__: Tuple=0 ): if str(UpperCamelCase__ ).startswith("""mps""" ): lowerCamelCase__ : Any = torch.manual_seed(UpperCamelCase__ ) else: lowerCamelCase__ : int = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) lowerCamelCase__ : Tuple = 2 lowerCamelCase__ : Optional[Any] = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCamelCase__ , device=torch.device(UpperCamelCase__ ) , ) lowerCamelCase__ : Optional[Any] = floats_tensor(control_image.shape , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) lowerCamelCase__ : str = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCamelCase__ : List[str] = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" ).resize((64, 64) ) lowerCamelCase__ : Tuple = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", """image""": image, """control_image""": control_image, } return inputs def lowerCamelCase_ ( self: Union[str, Any] ): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def lowerCamelCase_ ( self: Optional[int] ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 ) def lowerCamelCase_ ( self: Any ): self._test_inference_batch_single_identical(expected_max_diff=2e-3 ) class _lowercase ( _lowercase , _lowercase , unittest.TestCase ): a = StableDiffusionControlNetImgaImgPipeline a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""} a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS a = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def lowerCamelCase_ ( self: str ): torch.manual_seed(0 ) lowerCamelCase__ : List[Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) torch.manual_seed(0 ) def init_weights(UpperCamelCase__: Tuple ): if isinstance(UpperCamelCase__ , torch.nn.Convad ): torch.nn.init.normal(m.weight ) m.bias.data.fill_(1.0 ) lowerCamelCase__ : Optional[Any] = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(UpperCamelCase__ ) torch.manual_seed(0 ) lowerCamelCase__ : Tuple = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(UpperCamelCase__ ) torch.manual_seed(0 ) lowerCamelCase__ : Optional[Any] = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , ) torch.manual_seed(0 ) lowerCamelCase__ : List[Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) lowerCamelCase__ : str = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) lowerCamelCase__ : Optional[int] = CLIPTextModel(UpperCamelCase__ ) lowerCamelCase__ : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowerCamelCase__ : Any = MultiControlNetModel([controlneta, controlneta] ) lowerCamelCase__ : List[Any] = { """unet""": unet, """controlnet""": controlnet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: Optional[Any]=0 ): if str(UpperCamelCase__ ).startswith("""mps""" ): lowerCamelCase__ : int = torch.manual_seed(UpperCamelCase__ ) else: lowerCamelCase__ : Optional[Any] = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) lowerCamelCase__ : Any = 2 lowerCamelCase__ : List[str] = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCamelCase__ , device=torch.device(UpperCamelCase__ ) , ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCamelCase__ , device=torch.device(UpperCamelCase__ ) , ), ] lowerCamelCase__ : Tuple = floats_tensor(control_image[0].shape , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) lowerCamelCase__ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCamelCase__ : Any = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" ).resize((64, 64) ) lowerCamelCase__ : Optional[Any] = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", """image""": image, """control_image""": control_image, } return inputs def lowerCamelCase_ ( self: Tuple ): lowerCamelCase__ : str = self.get_dummy_components() lowerCamelCase__ : List[str] = self.pipeline_class(**UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) lowerCamelCase__ : List[Any] = 10.0 lowerCamelCase__ : Any = 4 lowerCamelCase__ : Union[str, Any] = self.get_dummy_inputs(UpperCamelCase__ ) lowerCamelCase__ : List[str] = steps lowerCamelCase__ : Optional[int] = scale lowerCamelCase__ : Union[str, Any] = pipe(**UpperCamelCase__ )[0] lowerCamelCase__ : Any = self.get_dummy_inputs(UpperCamelCase__ ) lowerCamelCase__ : Any = steps lowerCamelCase__ : Union[str, Any] = scale lowerCamelCase__ : Tuple = pipe(**UpperCamelCase__ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0] lowerCamelCase__ : str = self.get_dummy_inputs(UpperCamelCase__ ) lowerCamelCase__ : Tuple = steps lowerCamelCase__ : int = scale lowerCamelCase__ : List[str] = pipe(**UpperCamelCase__ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0] lowerCamelCase__ : Optional[Any] = self.get_dummy_inputs(UpperCamelCase__ ) lowerCamelCase__ : Optional[int] = steps lowerCamelCase__ : Optional[Any] = scale lowerCamelCase__ : Optional[Any] = pipe(**UpperCamelCase__ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0] # make sure that all outputs are different assert np.sum(np.abs(output_a - output_a ) ) > 1e-3 assert np.sum(np.abs(output_a - output_a ) ) > 1e-3 assert np.sum(np.abs(output_a - output_a ) ) > 1e-3 def lowerCamelCase_ ( self: str ): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def lowerCamelCase_ ( self: int ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 ) def lowerCamelCase_ ( self: int ): self._test_inference_batch_single_identical(expected_max_diff=2e-3 ) def lowerCamelCase_ ( self: Optional[Any] ): lowerCamelCase__ : Optional[Any] = self.get_dummy_components() lowerCamelCase__ : str = self.pipeline_class(**UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(UpperCamelCase__ ) except NotImplementedError: pass @slow @require_torch_gpu class _lowercase ( unittest.TestCase ): def lowerCamelCase_ ( self: List[str] ): super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : Optional[int] = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" ) lowerCamelCase__ : Tuple = StableDiffusionControlNetImgaImgPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , safety_checker=UpperCamelCase__ , controlnet=UpperCamelCase__ ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowerCamelCase__ : List[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 ) lowerCamelCase__ : List[Any] = """evil space-punk bird""" lowerCamelCase__ : List[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((512, 512) ) lowerCamelCase__ : Union[str, Any] = load_image( """https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((512, 512) ) lowerCamelCase__ : Optional[int] = pipe( UpperCamelCase__ , UpperCamelCase__ , control_image=UpperCamelCase__ , generator=UpperCamelCase__ , output_type="""np""" , num_inference_steps=50 , strength=0.6 , ) lowerCamelCase__ : Optional[int] = output.images[0] assert image.shape == (512, 512, 3) lowerCamelCase__ : Optional[int] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" ) assert np.abs(expected_image - image ).max() < 9e-2
41
from __future__ import annotations def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> tuple[float, list[float]]: """simple docstring""" A__ = list(range(len(lowercase_ ) ) ) A__ = [v / w for v, w in zip(lowercase_ , lowercase_ )] index.sort(key=lambda lowercase_ : ratio[i] , reverse=lowercase_ ) A__ = 0 A__ = [0] * len(lowercase_ ) for i in index: if weight[i] <= capacity: A__ = 1 max_value += value[i] capacity -= weight[i] else: A__ = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
14
0
'''simple docstring''' def SCREAMING_SNAKE_CASE__ ( __A ) -> str: _snake_case = 1 _snake_case = 2 while i * i <= n: _snake_case = 0 while n % i == 0: n //= i multiplicity += 1 n_divisors *= multiplicity + 1 i += 1 if n > 1: n_divisors *= 2 return n_divisors def SCREAMING_SNAKE_CASE__ ( ) -> List[str]: _snake_case = 1 _snake_case = 1 while True: i += 1 t_num += i if count_divisors(__A ) > 500: break return t_num if __name__ == "__main__": print(solution())
42
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[Any]: """simple docstring""" A__ = args.log_outputs A__ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] ) # load metric A__ = load_metric('''wer''' ) A__ = load_metric('''cer''' ) # compute metrics A__ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) A__ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) # print & log results A__ = f"""WER: {wer_result}\nCER: {cer_result}""" print(lowercase_ ) with open(f"""{dataset_id}_eval_results.txt""" , '''w''' ) as f: f.write(lowercase_ ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: A__ = f"""log_{dataset_id}_predictions.txt""" A__ = f"""log_{dataset_id}_targets.txt""" with open(lowercase_ , '''w''' ) as p, open(lowercase_ , '''w''' ) as t: # mapping function to write output def write_to_file(lowercase_ , lowercase_ ): p.write(f"""{i}""" + '''\n''' ) p.write(batch['''prediction'''] + '''\n''' ) t.write(f"""{i}""" + '''\n''' ) t.write(batch['''target'''] + '''\n''' ) result.map(lowercase_ , with_indices=lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str: """simple docstring""" A__ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training A__ = re.sub(lowercase_ , '''''' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! A__ = ['''\n\n''', '''\n''', ''' ''', ''' '''] for t in token_sequences_to_ignore: A__ = ''' '''.join(text.split(lowercase_ ) ) return text def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]: """simple docstring""" A__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowercase_ ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor A__ = AutoFeatureExtractor.from_pretrained(args.model_id ) A__ = feature_extractor.sampling_rate # resample audio A__ = dataset.cast_column('''audio''' , Audio(sampling_rate=lowercase_ ) ) # load eval pipeline if args.device is None: A__ = 0 if torch.cuda.is_available() else -1 A__ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(lowercase_ ): A__ = asr( batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) A__ = prediction['''text'''] A__ = normalize_text(batch['''sentence'''] ) return batch # run inference on all examples A__ = dataset.map(lowercase_ , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(lowercase_ , lowercase_ ) if __name__ == "__main__": _lowerCamelCase : List[Any] = argparse.ArgumentParser() parser.add_argument( """--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers""" ) parser.add_argument( """--dataset""", type=str, required=True, help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""", ) parser.add_argument( """--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice""" ) parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""") parser.add_argument( """--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds.""" ) parser.add_argument( """--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second.""" ) parser.add_argument( """--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis.""" ) parser.add_argument( """--device""", type=int, default=None, help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""", ) _lowerCamelCase : str = parser.parse_args() main(args)
14
0
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __lowercase = logging.get_logger(__name__) __lowercase = { '''facebook/wav2vec2-base-960h''': '''https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json''', # See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2 } class lowerCamelCase_ ( UpperCAmelCase_ ): '''simple docstring''' a__ : Union[str, Any] = """wav2vec2""" def __init__( self , __lowercase=32 , __lowercase=768 , __lowercase=12 , __lowercase=12 , __lowercase=3_072 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.02 , __lowercase=1E-5 , __lowercase="group" , __lowercase="gelu" , __lowercase=(512, 512, 512, 512, 512, 512, 512) , __lowercase=(5, 2, 2, 2, 2, 2, 2) , __lowercase=(10, 3, 3, 3, 3, 2, 2) , __lowercase=False , __lowercase=128 , __lowercase=16 , __lowercase=False , __lowercase=True , __lowercase=0.05 , __lowercase=10 , __lowercase=2 , __lowercase=0.0 , __lowercase=10 , __lowercase=0 , __lowercase=320 , __lowercase=2 , __lowercase=0.1 , __lowercase=100 , __lowercase=256 , __lowercase=256 , __lowercase=0.1 , __lowercase="sum" , __lowercase=False , __lowercase=False , __lowercase=256 , __lowercase=(512, 512, 512, 512, 1_500) , __lowercase=(5, 3, 3, 1, 1) , __lowercase=(1, 2, 3, 1, 1) , __lowercase=512 , __lowercase=0 , __lowercase=1 , __lowercase=2 , __lowercase=False , __lowercase=3 , __lowercase=2 , __lowercase=3 , __lowercase=None , __lowercase=None , **__lowercase , ) -> int: super().__init__(**__lowercase , pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase) __UpperCamelCase :Any = hidden_size __UpperCamelCase :int = feat_extract_norm __UpperCamelCase :Tuple = feat_extract_activation __UpperCamelCase :Union[str, Any] = list(__lowercase) __UpperCamelCase :List[Any] = list(__lowercase) __UpperCamelCase :int = list(__lowercase) __UpperCamelCase :List[Any] = conv_bias __UpperCamelCase :Optional[int] = num_conv_pos_embeddings __UpperCamelCase :Dict = num_conv_pos_embedding_groups __UpperCamelCase :Any = len(self.conv_dim) __UpperCamelCase :List[str] = num_hidden_layers __UpperCamelCase :int = intermediate_size __UpperCamelCase :str = hidden_act __UpperCamelCase :Any = num_attention_heads __UpperCamelCase :int = hidden_dropout __UpperCamelCase :Tuple = attention_dropout __UpperCamelCase :List[str] = activation_dropout __UpperCamelCase :Optional[Any] = feat_proj_dropout __UpperCamelCase :Any = final_dropout __UpperCamelCase :Any = layerdrop __UpperCamelCase :str = layer_norm_eps __UpperCamelCase :Optional[Any] = initializer_range __UpperCamelCase :List[str] = vocab_size __UpperCamelCase :str = do_stable_layer_norm __UpperCamelCase :Union[str, Any] = use_weighted_layer_sum if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' f""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,""" f""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""") # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __UpperCamelCase :List[Any] = apply_spec_augment __UpperCamelCase :Tuple = mask_time_prob __UpperCamelCase :int = mask_time_length __UpperCamelCase :Dict = mask_time_min_masks __UpperCamelCase :str = mask_feature_prob __UpperCamelCase :List[str] = mask_feature_length __UpperCamelCase :Union[str, Any] = mask_feature_min_masks # parameters for pretraining with codevector quantized representations __UpperCamelCase :Optional[Any] = num_codevectors_per_group __UpperCamelCase :List[Any] = num_codevector_groups __UpperCamelCase :Tuple = contrastive_logits_temperature __UpperCamelCase :Optional[int] = feat_quantizer_dropout __UpperCamelCase :Optional[int] = num_negatives __UpperCamelCase :List[Any] = codevector_dim __UpperCamelCase :str = proj_codevector_dim __UpperCamelCase :List[str] = diversity_loss_weight # ctc loss __UpperCamelCase :Tuple = ctc_loss_reduction __UpperCamelCase :Tuple = ctc_zero_infinity # adapter __UpperCamelCase :List[str] = add_adapter __UpperCamelCase :Tuple = adapter_kernel_size __UpperCamelCase :str = adapter_stride __UpperCamelCase :Tuple = num_adapter_layers __UpperCamelCase :Tuple = output_hidden_size or hidden_size __UpperCamelCase :Optional[Any] = adapter_attn_dim # SequenceClassification-specific parameter. Feel free to ignore for other classes. __UpperCamelCase :Optional[Any] = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. __UpperCamelCase :Optional[int] = list(__lowercase) __UpperCamelCase :List[Any] = list(__lowercase) __UpperCamelCase :List[Any] = list(__lowercase) __UpperCamelCase :str = xvector_output_dim @property def UpperCamelCase__ ( self) -> List[str]: return functools.reduce(operator.mul , self.conv_stride , 1)
43
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) _lowerCamelCase : int = { """configuration_blip""": [ """BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BlipConfig""", """BlipTextConfig""", """BlipVisionConfig""", ], """processing_blip""": ["""BlipProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Tuple = ["""BlipImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : List[Any] = [ """BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """BlipModel""", """BlipPreTrainedModel""", """BlipForConditionalGeneration""", """BlipForQuestionAnswering""", """BlipVisionModel""", """BlipTextModel""", """BlipForImageTextRetrieval""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Optional[Any] = [ """TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBlipModel""", """TFBlipPreTrainedModel""", """TFBlipForConditionalGeneration""", """TFBlipForQuestionAnswering""", """TFBlipVisionModel""", """TFBlipTextModel""", """TFBlipForImageTextRetrieval""", ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys _lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
14
0
"""simple docstring""" _a : dict[str, float] = { "joule": 1.0, "kilojoule": 1_000, "megajoule": 1_000_000, "gigajoule": 1_000_000_000, "wattsecond": 1.0, "watthour": 3_600, "kilowatthour": 3_600_000, "newtonmeter": 1.0, "calorie_nutr": 4_186.8, "kilocalorie_nutr": 4_186_800.00, "electronvolt": 1.6_02_17_66_34e-19, "britishthermalunit_it": 1_055.05_585, "footpound": 1.35_58_18, } def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : str ,_lowerCamelCase : float ) -> float: if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: _lowerCAmelCase : Union[str, Any] = ( f"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n" f"Valid values are: {', '.join(_lowerCamelCase )}" ) raise ValueError(_lowerCamelCase ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
44
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCamelCase : List[str] = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : List[Any] = [ """VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""", """ViTMSNModel""", """ViTMSNForImageClassification""", """ViTMSNPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys _lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
14
0
"""simple docstring""" import string def lowercase ( lowerCAmelCase__ : str ) -> str: __a = '''''' for i in sequence: __a = ord(lowerCAmelCase__ ) if 65 <= extract <= 90: output += chr(155 - extract ) elif 97 <= extract <= 122: output += chr(219 - extract ) else: output += i return output def lowercase ( lowerCAmelCase__ : str ) -> str: __a = string.ascii_letters __a = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1] return "".join( letters_reversed[letters.index(lowerCAmelCase__ )] if c in letters else c for c in sequence ) def lowercase ( ) -> None: from timeit import timeit print('''Running performance benchmarks...''' ) __a = '''from string import printable ; from __main__ import atbash, atbash_slow''' print(f'''> atbash_slow(): {timeit('atbash_slow(printable)' , setup=lowerCAmelCase__ )} seconds''' ) print(f'''> atbash(): {timeit('atbash(printable)' , setup=lowerCAmelCase__ )} seconds''' ) if __name__ == "__main__": for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"): print(F'''{example} encrypted in atbash: {atbash(example)}''') benchmark()
45
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> "list[int]": """simple docstring""" if upper_limit < 0: raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' ) A__ = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 A__ = 1 if upper_limit > 0: A__ = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(lowercase_ ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""") print("""\n*** Enter -1 at any time to quit ***""") print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""") try: while True: _lowerCamelCase : List[Any] = int(input().strip()) if N < 0: print("""\n********* Goodbye!! ************""") break else: print(F'''The Catalan numbers from 0 through {N} are:''') print(catalan_numbers(N)) print("""Try another upper limit for the sequence: """, end="""""") except (NameError, ValueError): print("""\n********* Invalid input, goodbye! ************\n""") import doctest doctest.testmod()
14
0
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} SCREAMING_SNAKE_CASE__ = { "vocab_file": { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json", "roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json", "roberta-large-openai-detector": ( "https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json" ), }, "merges_file": { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt", "roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt", "roberta-large-openai-detector": ( "https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt" ), }, "tokenizer_file": { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json", "roberta-base-openai-detector": ( "https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json" ), "roberta-large-openai-detector": ( "https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json" ), }, } SCREAMING_SNAKE_CASE__ = { "roberta-base": 512, "roberta-large": 512, "roberta-large-mnli": 512, "distilroberta-base": 512, "roberta-base-openai-detector": 512, "roberta-large-openai-detector": 512, } class lowercase ( _UpperCAmelCase ): _SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES _SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP _SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask'] _SCREAMING_SNAKE_CASE = RobertaTokenizer def __init__( self , lowercase=None , lowercase=None , lowercase=None , lowercase="replace" , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=False , lowercase=True , **lowercase , ) -> Optional[int]: super().__init__( lowercase , lowercase , tokenizer_file=lowercase , errors=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase , **lowercase , ) lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , lowercase ) != add_prefix_space: lowerCAmelCase = getattr(lowercase , pre_tok_state.pop("""type""" ) ) lowerCAmelCase = add_prefix_space lowerCAmelCase = pre_tok_class(**lowercase ) lowerCAmelCase = add_prefix_space lowerCAmelCase = """post_processor""" lowerCAmelCase = getattr(self.backend_tokenizer , lowercase , lowercase ) if tokenizer_component_instance: lowerCAmelCase = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowerCAmelCase = tuple(state["""sep"""] ) if "cls" in state: lowerCAmelCase = tuple(state["""cls"""] ) lowerCAmelCase = False if state.get("""add_prefix_space""" , lowercase ) != add_prefix_space: lowerCAmelCase = add_prefix_space lowerCAmelCase = True if state.get("""trim_offsets""" , lowercase ) != trim_offsets: lowerCAmelCase = trim_offsets lowerCAmelCase = True if changes_to_apply: lowerCAmelCase = getattr(lowercase , state.pop("""type""" ) ) lowerCAmelCase = component_class(**lowercase ) setattr(self.backend_tokenizer , lowercase , lowercase ) @property def _snake_case ( self ) -> str: if self._mask_token is None: if self.verbose: logger.error("""Using mask_token, but it is not set yet.""" ) return None return str(self._mask_token ) @mask_token.setter def _snake_case ( self , lowercase ) -> Union[str, Any]: lowerCAmelCase = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else value lowerCAmelCase = value def _snake_case ( self , *lowercase , **lowercase ) -> BatchEncoding: lowerCAmelCase = kwargs.get("""is_split_into_words""" , lowercase ) assert self.add_prefix_space or not is_split_into_words, ( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowercase , **lowercase ) def _snake_case ( self , *lowercase , **lowercase ) -> BatchEncoding: lowerCAmelCase = kwargs.get("""is_split_into_words""" , lowercase ) assert self.add_prefix_space or not is_split_into_words, ( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._encode_plus(*lowercase , **lowercase ) def _snake_case ( self , lowercase , lowercase = None ) -> Tuple[str]: lowerCAmelCase = self._tokenizer.model.save(lowercase , name=lowercase ) return tuple(lowercase ) def _snake_case ( self , lowercase , lowercase=None ) -> List[Any]: lowerCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _snake_case ( self , lowercase , lowercase = None ) -> List[int]: lowerCAmelCase = [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
46
import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict: """simple docstring""" A__ = args.pruning_method A__ = args.threshold A__ = args.model_name_or_path.rstrip('''/''' ) A__ = args.target_model_path print(f"""Load fine-pruned model from {model_name_or_path}""" ) A__ = torch.load(os.path.join(lowercase_ , '''pytorch_model.bin''' ) ) A__ = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: A__ = tensor print(f"""Copied layer {name}""" ) elif "classifier" in name or "qa_output" in name: A__ = tensor print(f"""Copied layer {name}""" ) elif "bias" in name: A__ = tensor print(f"""Copied layer {name}""" ) else: if pruning_method == "magnitude": A__ = MagnitudeBinarizer.apply(inputs=lowercase_ , threshold=lowercase_ ) A__ = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "topK": if "mask_scores" in name: continue A__ = name[:-6] A__ = model[f"""{prefix_}mask_scores"""] A__ = TopKBinarizer.apply(lowercase_ , lowercase_ ) A__ = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue A__ = name[:-6] A__ = model[f"""{prefix_}mask_scores"""] A__ = ThresholdBinarizer.apply(lowercase_ , lowercase_ , lowercase_ ) A__ = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "l0": if "mask_scores" in name: continue A__ = name[:-6] A__ = model[f"""{prefix_}mask_scores"""] A__ , A__ = -0.1, 1.1 A__ = torch.sigmoid(lowercase_ ) A__ = s * (r - l) + l A__ = s_bar.clamp(min=0.0 , max=1.0 ) A__ = tensor * mask print(f"""Pruned layer {name}""" ) else: raise ValueError('''Unknown pruning method''' ) if target_model_path is None: A__ = os.path.join( os.path.dirname(lowercase_ ) , f"""bertarized_{os.path.basename(lowercase_ )}""" ) if not os.path.isdir(lowercase_ ): shutil.copytree(lowercase_ , lowercase_ ) print(f"""\nCreated folder {target_model_path}""" ) torch.save(lowercase_ , os.path.join(lowercase_ , '''pytorch_model.bin''' ) ) print('''\nPruned model saved! See you later!''' ) if __name__ == "__main__": _lowerCamelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument( """--pruning_method""", choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""], type=str, required=True, help=( """Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,""" """ sigmoied_threshold = Soft movement pruning)""" ), ) parser.add_argument( """--threshold""", type=float, required=False, help=( """For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.""" """For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.""" """Not needed for `l0`""" ), ) parser.add_argument( """--model_name_or_path""", type=str, required=True, help="""Folder containing the model that was previously fine-pruned""", ) parser.add_argument( """--target_model_path""", default=None, type=str, required=False, help="""Folder containing the model that was previously fine-pruned""", ) _lowerCamelCase : int = parser.parse_args() main(args)
14
0
'''simple docstring''' from unittest import TestCase from datasets import Sequence, Value from datasets.arrow_dataset import Dataset class A__ ( A__ ): def A ( self : Optional[Any] ) -> str: '''simple docstring''' return [ {"col_1": 3, "col_2": "a"}, {"col_1": 2, "col_2": "b"}, {"col_1": 1, "col_2": "c"}, {"col_1": 0, "col_2": "d"}, ] def A ( self : Any ) -> Tuple: '''simple docstring''' _SCREAMING_SNAKE_CASE ={'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']} return Dataset.from_dict(_a ) def A ( self : Union[str, Any] ) -> Tuple: '''simple docstring''' _SCREAMING_SNAKE_CASE =self._create_example_records() _SCREAMING_SNAKE_CASE =Dataset.from_list(_a ) self.assertListEqual(dset.column_names , ['col_1', 'col_2'] ) for i, r in enumerate(_a ): self.assertDictEqual(_a , example_records[i] ) def A ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =self._create_example_records() _SCREAMING_SNAKE_CASE =Dataset.from_list(_a ) _SCREAMING_SNAKE_CASE =Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} ) self.assertEqual(dset.info , dset_from_dict.info ) def A ( self : Any ) -> List[Any]: # checks what happens with missing columns '''simple docstring''' _SCREAMING_SNAKE_CASE =[{'col_1': 1}, {'col_2': 'x'}] _SCREAMING_SNAKE_CASE =Dataset.from_list(_a ) self.assertDictEqual(dset[0] , {'col_1': 1} ) self.assertDictEqual(dset[1] , {'col_1': None} ) # NB: first record is used for columns def A ( self : str ) -> int: # checks if the type can be inferred from the second record '''simple docstring''' _SCREAMING_SNAKE_CASE =[{'col_1': []}, {'col_1': [1, 2]}] _SCREAMING_SNAKE_CASE =Dataset.from_list(_a ) self.assertEqual(dset.info.features['col_1'] , Sequence(Value('int64' ) ) ) def A ( self : Optional[Any] ) -> Tuple: '''simple docstring''' _SCREAMING_SNAKE_CASE =Dataset.from_list([] ) self.assertEqual(len(_a ) , 0 ) self.assertListEqual(dset.column_names , [] )
47
_lowerCamelCase : Optional[int] = 65521 def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int: """simple docstring""" A__ = 1 A__ = 0 for plain_chr in plain_text: A__ = (a + ord(lowercase_ )) % MOD_ADLER A__ = (b + a) % MOD_ADLER return (b << 16) | a
14
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_fnet import FNetTokenizer else: SCREAMING_SNAKE_CASE__ : str = None SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : List[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} SCREAMING_SNAKE_CASE__ : Any = { 'vocab_file': { 'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model', 'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model', }, 'tokenizer_file': { 'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json', 'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json', }, } SCREAMING_SNAKE_CASE__ : Tuple = { 'google/fnet-base': 512, 'google/fnet-large': 512, } SCREAMING_SNAKE_CASE__ : Union[str, Any] = '▁' class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : int = VOCAB_FILES_NAMES lowerCamelCase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase_ : Optional[int] = ["""input_ids""", """token_type_ids"""] lowerCamelCase_ : List[str] = FNetTokenizer def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__="<unk>" , UpperCamelCase__="[SEP]" , UpperCamelCase__="<pad>" , UpperCamelCase__="[CLS]" , UpperCamelCase__="[MASK]" , **UpperCamelCase__ , ) -> str: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. lowerCamelCase : Any = ( AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ , normalized=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token ) super().__init__( UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , **UpperCamelCase__ , ) lowerCamelCase : Optional[Any] = do_lower_case lowerCamelCase : Union[str, Any] = remove_space lowerCamelCase : str = keep_accents lowerCamelCase : Optional[Any] = vocab_file lowerCamelCase : List[Any] = False if not self.vocab_file else True def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]: lowerCamelCase : int = [self.sep_token_id] lowerCamelCase : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]: lowerCamelCase : List[str] = [self.sep_token_id] lowerCamelCase : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]: if not os.path.isdir(UpperCamelCase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase : int = os.path.join( UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ): copyfile(self.vocab_file , UpperCamelCase__ ) return (out_vocab_file,)
48
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer _lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) _lowerCamelCase : Tuple = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} _lowerCamelCase : Union[str, Any] = { """vocab_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } _lowerCamelCase : str = { """vocab_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } _lowerCamelCase : str = { """vocab_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json""" ), }, } _lowerCamelCase : Any = { """facebook/dpr-ctx_encoder-single-nq-base""": 512, """facebook/dpr-ctx_encoder-multiset-base""": 512, } _lowerCamelCase : List[str] = { """facebook/dpr-question_encoder-single-nq-base""": 512, """facebook/dpr-question_encoder-multiset-base""": 512, } _lowerCamelCase : Tuple = { """facebook/dpr-reader-single-nq-base""": 512, """facebook/dpr-reader-multiset-base""": 512, } _lowerCamelCase : Optional[Any] = { """facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True}, } _lowerCamelCase : Optional[int] = { """facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True}, } _lowerCamelCase : Optional[Any] = { """facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True}, } class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ = DPRContextEncoderTokenizer class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ = DPRQuestionEncoderTokenizer _lowerCamelCase : int = collections.namedtuple( """DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""] ) _lowerCamelCase : Any = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""]) _lowerCamelCase : Dict = r""" Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. """ @add_start_docstrings(UpperCAmelCase__ ) class UpperCamelCase_ : '''simple docstring''' def __call__( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Union[bool, str] = False , UpperCAmelCase__ : Union[bool, str] = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Optional[bool] = None , **UpperCAmelCase__ : Optional[int] , ) ->BatchEncoding: '''simple docstring''' if titles is None and texts is None: return super().__call__( UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , ) elif titles is None or texts is None: A__ = titles if texts is None else texts return super().__call__( UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , ) A__ = titles if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [titles] A__ = texts if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [texts] A__ = len(UpperCAmelCase__) A__ = questions if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [questions] * n_passages assert len(UpperCAmelCase__) == len( UpperCAmelCase__), f"""There should be as many titles than texts but got {len(UpperCAmelCase__)} titles and {len(UpperCAmelCase__)} texts.""" A__ = super().__call__(UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__)['''input_ids'''] A__ = super().__call__(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__)['''input_ids'''] A__ = { '''input_ids''': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(UpperCAmelCase__ , UpperCAmelCase__) ] } if return_attention_mask is not False: A__ = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids]) A__ = attention_mask return self.pad(UpperCAmelCase__ , padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : BatchEncoding , UpperCAmelCase__ : DPRReaderOutput , UpperCAmelCase__ : int = 16 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 4 , ) ->List[DPRSpanPrediction]: '''simple docstring''' A__ = reader_input['''input_ids'''] A__ , A__ , A__ = reader_output[:3] A__ = len(UpperCAmelCase__) A__ = sorted(range(UpperCAmelCase__) , reverse=UpperCAmelCase__ , key=relevance_logits.__getitem__) A__ = [] for doc_id in sorted_docs: A__ = list(input_ids[doc_id]) # assuming question & title information is at the beginning of the sequence A__ = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: A__ = sequence_ids.index(self.pad_token_id) else: A__ = len(UpperCAmelCase__) A__ = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCAmelCase__ , top_spans=UpperCAmelCase__ , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCAmelCase__ , start_index=UpperCAmelCase__ , end_index=UpperCAmelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1]) , )) if len(UpperCAmelCase__) >= num_spans: break return nbest_spans_predictions[:num_spans] def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , ) ->List[DPRSpanPrediction]: '''simple docstring''' A__ = [] for start_index, start_score in enumerate(UpperCAmelCase__): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]): scores.append(((start_index, start_index + answer_length), start_score + end_score)) A__ = sorted(UpperCAmelCase__ , key=lambda UpperCAmelCase__: x[1] , reverse=UpperCAmelCase__) A__ = [] for (start_index, end_index), score in scores: assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]""" A__ = end_index - start_index + 1 assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}""" if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals): continue chosen_span_intervals.append((start_index, end_index)) if len(UpperCAmelCase__) == top_spans: break return chosen_span_intervals @add_end_docstrings(UpperCAmelCase__ ) class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = READER_PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ = ['''input_ids''', '''attention_mask'''] UpperCAmelCase__ = DPRReaderTokenizer
14
0
import json import logging import os import sys from pathlib import Path import finetune_rag from transformers.file_utils import is_apex_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, require_ray, require_torch_gpu, require_torch_multi_gpu, ) logging.basicConfig(level=logging.DEBUG) __snake_case :Tuple = logging.getLogger() __snake_case :Optional[int] = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class _A ( __UpperCAmelCase ): def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]): '''simple docstring''' os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE) __a = {'''source''': '''What is love ?''', '''target''': '''life'''} __a = {'''train''': 12, '''val''': 2, '''test''': 2} for split in ["train", "test", "val"]: for field in ["source", "target"]: __a = '''\n'''.join([contents[field]] * n_lines[split]) with open(os.path.join(__SCREAMING_SNAKE_CASE , F'{split}.{field}') , '''w''') as f: f.write(__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str = "pytorch"): '''simple docstring''' __a = self.get_auto_remove_tmp_dir() __a = os.path.join(__SCREAMING_SNAKE_CASE , '''output''') __a = os.path.join(__SCREAMING_SNAKE_CASE , '''data''') self._create_dummy_data(data_dir=__SCREAMING_SNAKE_CASE) __a = F'\n --data_dir {data_dir} \\n --output_dir {output_dir} \\n --model_name_or_path facebook/rag-sequence-base \\n --model_type rag_sequence \\n --do_train \\n --do_predict \\n --n_val -1 \\n --val_check_interval 1.0 \\n --train_batch_size 2 \\n --eval_batch_size 1 \\n --max_source_length 25 \\n --max_target_length 25 \\n --val_max_target_length 25 \\n --test_max_target_length 25 \\n --label_smoothing 0.1 \\n --dropout 0.1 \\n --attention_dropout 0.1 \\n --weight_decay 0.001 \\n --adam_epsilon 1e-08 \\n --max_grad_norm 0.1 \\n --lr_scheduler polynomial \\n --learning_rate 3e-04 \\n --num_train_epochs 1 \\n --warmup_steps 4 \\n --gradient_accumulation_steps 1 \\n --distributed-port 8787 \\n --use_dummy_dataset 1 \\n --distributed_retriever {distributed_retriever} \\n '.split() if gpus > 0: testargs.append(F'--gpus={gpus}') if is_apex_available(): testargs.append('''--fp16''') else: testargs.append('''--gpus=0''') testargs.append('''--distributed_backend=ddp_cpu''') testargs.append('''--num_processes=2''') __a = [sys.executable, str(Path(finetune_rag.__file__).resolve())] + testargs execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=self.get_env()) __a = os.path.join(__SCREAMING_SNAKE_CASE , '''metrics.json''') with open(__SCREAMING_SNAKE_CASE) as f: __a = json.load(__SCREAMING_SNAKE_CASE) return result @require_torch_gpu def _lowerCamelCase ( self : str): '''simple docstring''' __a = self._run_finetune(gpus=1) self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2) @require_torch_multi_gpu def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = self._run_finetune(gpus=2) self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2) @require_torch_gpu @require_ray def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' __a = self._run_finetune(gpus=1 , distributed_retriever='''ray''') self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2) @require_torch_multi_gpu @require_ray def _lowerCamelCase ( self : Tuple): '''simple docstring''' __a = self._run_finetune(gpus=1 , distributed_retriever='''ray''') self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2)
49
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase : Any = logging.get_logger(__name__) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''encoder-decoder''' UpperCAmelCase__ = True def __init__( self : List[str] , **UpperCAmelCase__ : Union[str, Any]) ->List[Any]: '''simple docstring''' super().__init__(**UpperCAmelCase__) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" A__ = kwargs.pop('''encoder''') A__ = encoder_config.pop('''model_type''') A__ = kwargs.pop('''decoder''') A__ = decoder_config.pop('''model_type''') from ..auto.configuration_auto import AutoConfig A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__) A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__) A__ = True @classmethod def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Union[str, Any]) ->PretrainedConfig: '''simple docstring''' logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''') A__ = True A__ = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]: '''simple docstring''' A__ = copy.deepcopy(self.__dict__) A__ = self.encoder.to_dict() A__ = self.decoder.to_dict() A__ = self.__class__.model_type return output
14
0
import argparse import os import re import packaging.version _UpperCAmelCase : Optional[int] = """examples/""" _UpperCAmelCase : List[str] = { """examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""), """init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""), """setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""), """doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""), } _UpperCAmelCase : int = { """init""": """src/transformers/__init__.py""", """setup""": """setup.py""", } _UpperCAmelCase : List[str] = """README.md""" def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict: with open(_UpperCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f: lowerCamelCase__ : Optional[Any] = f.read() lowerCamelCase__ , lowerCamelCase__ : str = REPLACE_PATTERNS[pattern] lowerCamelCase__ : str = replace.replace('VERSION' , _UpperCAmelCase ) lowerCamelCase__ : List[Any] = re_pattern.sub(_UpperCAmelCase , _UpperCAmelCase ) with open(_UpperCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f: f.write(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[str]: for folder, directories, fnames in os.walk(_UpperCAmelCase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('research_projects' ) if "legacy" in directories: directories.remove('legacy' ) for fname in fnames: if fname.endswith('.py' ): update_version_in_file(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase , pattern='examples' ) def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase=False ) -> Optional[Any]: for pattern, fname in REPLACE_FILES.items(): update_version_in_file(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if not patch: update_version_in_examples(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]: lowerCamelCase__ : List[str] = '🤗 Transformers currently provides the following architectures' lowerCamelCase__ : str = '1. Want to contribute a new model?' with open(_UpperCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f: lowerCamelCase__ : str = f.readlines() # Find the start of the list. lowerCamelCase__ : Any = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 lowerCamelCase__ : Optional[Any] = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('1.' ): lowerCamelCase__ : str = lines[index].replace( 'https://huggingface.co/docs/transformers/main/model_doc' , 'https://huggingface.co/docs/transformers/model_doc' , ) index += 1 with open(_UpperCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f: f.writelines(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE ( ) -> Optional[int]: with open(REPLACE_FILES['init'] , 'r' ) as f: lowerCamelCase__ : Any = f.read() lowerCamelCase__ : Union[str, Any] = REPLACE_PATTERNS['init'][0].search(_UpperCAmelCase ).groups()[0] return packaging.version.parse(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE ( _UpperCAmelCase=False ) -> Optional[Any]: lowerCamelCase__ : Tuple = get_version() if patch and default_version.is_devrelease: raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' ) if default_version.is_devrelease: lowerCamelCase__ : List[str] = default_version.base_version elif patch: lowerCamelCase__ : Union[str, Any] = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}""" else: lowerCamelCase__ : Union[str, Any] = F"""{default_version.major}.{default_version.minor + 1}.0""" # Now let's ask nicely if that's the right one. lowerCamelCase__ : Optional[int] = input(F"""Which version are you releasing? [{default_version}]""" ) if len(_UpperCAmelCase ) == 0: lowerCamelCase__ : str = default_version print(F"""Updating version to {version}.""" ) global_version_update(_UpperCAmelCase , patch=_UpperCAmelCase ) if not patch: print('Cleaning main README, don\'t forget to run `make fix-copies`.' ) clean_main_ref_in_model_list() def SCREAMING_SNAKE_CASE ( ) -> Tuple: lowerCamelCase__ : Tuple = get_version() lowerCamelCase__ : List[Any] = F"""{current_version.major}.{current_version.minor + 1}.0.dev0""" lowerCamelCase__ : Union[str, Any] = current_version.base_version # Check with the user we got that right. lowerCamelCase__ : Any = input(F"""Which version are we developing now? [{dev_version}]""" ) if len(_UpperCAmelCase ) == 0: lowerCamelCase__ : Dict = dev_version print(F"""Updating version to {version}.""" ) global_version_update(_UpperCAmelCase ) print('Cleaning main README, don\'t forget to run `make fix-copies`.' ) clean_main_ref_in_model_list() if __name__ == "__main__": _UpperCAmelCase : Dict = argparse.ArgumentParser() parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""") parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""") _UpperCAmelCase : int = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("""Nothing to do after a patch :-)""") else: post_release_work()
50
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any: """simple docstring""" A__ = [0] * len(lowercase_ ) A__ = [] A__ = [1] * len(lowercase_ ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(lowercase_ ) ): if indegree[i] == 0: queue.append(lowercase_ ) while queue: A__ = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: A__ = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(lowercase_ ) print(max(lowercase_ ) ) # Adjacency list of Graph _lowerCamelCase : Optional[int] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
14
0
def A (__A : int = 1000000 ) -> int: """simple docstring""" UpperCAmelCase_ = limit + 1 UpperCAmelCase_ = [0] * limit for first_term in range(1 , __A ): for n in range(__A , __A , __A ): UpperCAmelCase_ = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a UpperCAmelCase_ = sum(1 for x in frequency[1:limit] if x == 10 ) return count if __name__ == "__main__": print(f"{solution() = }")
51
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline _lowerCamelCase : Optional[Any] = datasets.utils.logging.get_logger(__name__) @dataclass class UpperCamelCase_ ( datasets.BuilderConfig ): '''simple docstring''' UpperCAmelCase__ = None UpperCAmelCase__ = "utf-8" UpperCAmelCase__ = None UpperCAmelCase__ = None UpperCAmelCase__ = True # deprecated UpperCAmelCase__ = None # deprecated UpperCAmelCase__ = 10 << 20 # 10MB UpperCAmelCase__ = None class UpperCamelCase_ ( datasets.ArrowBasedBuilder ): '''simple docstring''' UpperCAmelCase__ = JsonConfig def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str: '''simple docstring''' if self.config.block_size is not None: logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''') A__ = self.config.block_size if self.config.use_threads is not True: logger.warning( '''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''') if self.config.newlines_in_values is not None: raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''') return datasets.DatasetInfo(features=self.config.features) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : List[Any]) ->Dict: '''simple docstring''' if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""") A__ = dl_manager.download_and_extract(self.config.data_files) if isinstance(UpperCAmelCase__ , (str, list, tuple)): A__ = data_files if isinstance(UpperCAmelCase__ , UpperCAmelCase__): A__ = [files] A__ = [dl_manager.iter_files(UpperCAmelCase__) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files})] A__ = [] for split_name, files in data_files.items(): if isinstance(UpperCAmelCase__ , UpperCAmelCase__): A__ = [files] A__ = [dl_manager.iter_files(UpperCAmelCase__) for file in files] splits.append(datasets.SplitGenerator(name=UpperCAmelCase__ , gen_kwargs={'''files''': files})) return splits def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : pa.Table) ->pa.Table: '''simple docstring''' if self.config.features is not None: # adding missing columns for column_name in set(self.config.features) - set(pa_table.column_names): A__ = self.config.features.arrow_schema.field(UpperCAmelCase__).type A__ = pa_table.append_column(UpperCAmelCase__ , pa.array([None] * len(UpperCAmelCase__) , type=UpperCAmelCase__)) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example A__ = table_cast(UpperCAmelCase__ , self.config.features.arrow_schema) return pa_table def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Tuple) ->str: '''simple docstring''' for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase__)): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(UpperCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: A__ = json.load(UpperCAmelCase__) # We keep only the field we are interested in A__ = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(UpperCAmelCase__ , (list, tuple)): A__ = set().union(*[row.keys() for row in dataset]) A__ = {col: [row.get(UpperCAmelCase__) for row in dataset] for col in keys} else: A__ = dataset A__ = pa.Table.from_pydict(UpperCAmelCase__) yield file_idx, self._cast_table(UpperCAmelCase__) # If the file has one json object per line else: with open(UpperCAmelCase__ , '''rb''') as f: A__ = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small A__ = max(self.config.chunksize // 32 , 16 << 10) A__ = ( self.config.encoding_errors if self.config.encoding_errors is not None else '''strict''' ) while True: A__ = f.read(self.config.chunksize) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(UpperCAmelCase__) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": A__ = batch.decode(self.config.encoding , errors=UpperCAmelCase__).encode('''utf-8''') try: while True: try: A__ = paj.read_json( io.BytesIO(UpperCAmelCase__) , read_options=paj.ReadOptions(block_size=UpperCAmelCase__)) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(UpperCAmelCase__ , pa.ArrowInvalid) and "straddling" not in str(UpperCAmelCase__) or block_size > len(UpperCAmelCase__) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( f"""Batch of {len(UpperCAmelCase__)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""") block_size *= 2 except pa.ArrowInvalid as e: try: with open( UpperCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: A__ = json.load(UpperCAmelCase__) except json.JSONDecodeError: logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""") raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(UpperCAmelCase__ , UpperCAmelCase__): # list is the only sequence type supported in JSON try: A__ = set().union(*[row.keys() for row in dataset]) A__ = {col: [row.get(UpperCAmelCase__) for row in dataset] for col in keys} A__ = pa.Table.from_pydict(UpperCAmelCase__) except (pa.ArrowInvalid, AttributeError) as e: logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""") raise ValueError(f"""Not able to read records in the JSON file at {file}.""") from None yield file_idx, self._cast_table(UpperCAmelCase__) break else: logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""") raise ValueError( f"""Not able to read records in the JSON file at {file}. """ f"""You should probably indicate the field of the JSON file containing your records. """ f"""This JSON file contain the following fields: {str(list(dataset.keys()))}. """ f"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase__) batch_idx += 1
14
0
import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict: UpperCamelCase : Optional[int] = tmp_path / "cache" UpperCamelCase : Any = {"text": "string"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCamelCase : Any = TextDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read() _check_text_dataset(_lowerCAmelCase , _lowerCAmelCase ) @pytest.mark.parametrize( "features" , [ None, {"text": "string"}, {"text": "int32"}, {"text": "float32"}, ] , ) def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str: UpperCamelCase : int = tmp_path / "cache" UpperCamelCase : int = {"text": "string"} UpperCamelCase : List[str] = features.copy() if features else default_expected_features UpperCamelCase : Optional[Any] = ( Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCamelCase : Union[str, Any] = TextDatasetReader(_lowerCAmelCase , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read() _check_text_dataset(_lowerCAmelCase , _lowerCAmelCase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: UpperCamelCase : Union[str, Any] = tmp_path / "cache" UpperCamelCase : Optional[Any] = {"text": "string"} UpperCamelCase : List[str] = TextDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , split=_lowerCAmelCase ).read() _check_text_dataset(_lowerCAmelCase , _lowerCAmelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list] ) def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]: if issubclass(_lowerCAmelCase , _lowerCAmelCase ): UpperCamelCase : str = text_path elif issubclass(_lowerCAmelCase , _lowerCAmelCase ): UpperCamelCase : Tuple = [text_path] UpperCamelCase : Union[str, Any] = tmp_path / "cache" UpperCamelCase : Any = {"text": "string"} UpperCamelCase : Optional[Any] = TextDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read() _check_text_dataset(_lowerCAmelCase , _lowerCAmelCase ) def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=("train",) ) -> str: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) for split in splits: UpperCamelCase : Optional[int] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: UpperCamelCase : int = tmp_path / "cache" UpperCamelCase : int = {"text": "string"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCamelCase : Union[str, Any] = TextDatasetReader({"train": text_path} , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read() _check_text_datasetdict(_lowerCAmelCase , _lowerCAmelCase ) @pytest.mark.parametrize( "features" , [ None, {"text": "string"}, {"text": "int32"}, {"text": "float32"}, ] , ) def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: UpperCamelCase : Tuple = tmp_path / "cache" # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" UpperCamelCase : Optional[Any] = {"text": "string"} UpperCamelCase : Dict = features.copy() if features else default_expected_features UpperCamelCase : Tuple = ( Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCamelCase : Optional[int] = TextDatasetReader({"train": text_path} , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read() _check_text_datasetdict(_lowerCAmelCase , _lowerCAmelCase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any: if split: UpperCamelCase : int = {split: text_path} else: UpperCamelCase : Optional[int] = "train" UpperCamelCase : str = {"train": text_path, "test": text_path} UpperCamelCase : int = tmp_path / "cache" UpperCamelCase : int = {"text": "string"} UpperCamelCase : str = TextDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read() _check_text_datasetdict(_lowerCAmelCase , _lowerCAmelCase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
52
import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch _lowerCamelCase : List[Any] = """sshleifer/bart-tiny-random""" _lowerCamelCase : List[Any] = """patrickvonplaten/t5-tiny-random""" @require_torch class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int: '''simple docstring''' return AutoConfig.from_pretrained(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any: '''simple docstring''' A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1) self.assertEqual(student.config.num_hidden_layers , 1) def SCREAMING_SNAKE_CASE ( self : int) ->Any: '''simple docstring''' A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]: '''simple docstring''' A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase__) self.assertEqual(student.config.encoder_layers , 1) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers) def SCREAMING_SNAKE_CASE ( self : Dict) ->int: '''simple docstring''' A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1) self.assertEqual(student.config.encoder_layers , 1) self.assertEqual(student.config.decoder_layers , 1) def SCREAMING_SNAKE_CASE ( self : str) ->List[Any]: '''simple docstring''' with self.assertRaises(UpperCAmelCase__): create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=UpperCAmelCase__ , d=UpperCAmelCase__)
14
0
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a__ : List[Any] =logging.get_logger(__name__) a__ : List[Any] ={ '''BAAI/AltCLIP''': '''https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json''', # See all AltCLIP models at https://huggingface.co/models?filter=altclip } class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple ="altclip_text_model" def __init__( self : str , __A : List[Any]=2_5_0_0_0_2 , __A : Any=1_0_2_4 , __A : int=2_4 , __A : Dict=1_6 , __A : Optional[Any]=4_0_9_6 , __A : Union[str, Any]="gelu" , __A : Dict=0.1 , __A : Dict=0.1 , __A : List[str]=5_1_4 , __A : Optional[int]=1 , __A : int=0.02 , __A : Optional[Any]=0.02 , __A : Optional[Any]=1e-05 , __A : Dict=1 , __A : List[Any]=0 , __A : int=2 , __A : Tuple="absolute" , __A : Optional[Any]=True , __A : Optional[int]=7_6_8 , **__A : List[str] , ): super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = hidden_act __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = type_vocab_size __UpperCamelCase = initializer_range __UpperCamelCase = initializer_factor __UpperCamelCase = layer_norm_eps __UpperCamelCase = position_embedding_type __UpperCamelCase = use_cache __UpperCamelCase = project_dim class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple ="altclip_vision_model" def __init__( self : List[Any] , __A : Union[str, Any]=7_6_8 , __A : Optional[int]=3_0_7_2 , __A : Optional[Any]=5_1_2 , __A : Tuple=1_2 , __A : Union[str, Any]=1_2 , __A : Optional[int]=3 , __A : Dict=2_2_4 , __A : Tuple=3_2 , __A : str="quick_gelu" , __A : Dict=1e-5 , __A : Optional[int]=0.0 , __A : List[Any]=0.02 , __A : int=1.0 , **__A : Optional[int] , ): super().__init__(**__A ) __UpperCamelCase = hidden_size __UpperCamelCase = intermediate_size __UpperCamelCase = projection_dim __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = num_channels __UpperCamelCase = patch_size __UpperCamelCase = image_size __UpperCamelCase = initializer_range __UpperCamelCase = initializer_factor __UpperCamelCase = attention_dropout __UpperCamelCase = layer_norm_eps __UpperCamelCase = hidden_act @classmethod def _lowerCamelCase ( cls : Optional[Any] , __A : Union[str, os.PathLike] , **__A : Optional[Any] ): cls._set_token_in_kwargs(__A ) __UpperCamelCase , __UpperCamelCase = cls.get_config_dict(__A , **__A ) # get the vision config dict if we are loading from AltCLIPConfig if config_dict.get('model_type' ) == "altclip": __UpperCamelCase = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__A , **__A ) class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] ="altclip" SCREAMING_SNAKE_CASE_ : Optional[int] =True def __init__( self : Any , __A : List[str]=None , __A : List[Any]=None , __A : List[str]=7_6_8 , __A : List[str]=2.6592 , **__A : Dict ): # If `_config_dict` exist, we use them for the backward compatibility. # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot # of confusion!). __UpperCamelCase = kwargs.pop('text_config_dict' , __A ) __UpperCamelCase = kwargs.pop('vision_config_dict' , __A ) super().__init__(**__A ) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: __UpperCamelCase = {} # This is the complete result when using `text_config_dict`. __UpperCamelCase = AltCLIPTextConfig(**__A ).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: __UpperCamelCase = ( f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. ''' f'''The value `text_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: __UpperCamelCase = ( f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The ''' f'''value `text_config["{key}"]` will be overriden.''' ) logger.warning(__A ) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict ) if vision_config_dict is not None: if vision_config is None: __UpperCamelCase = {} # This is the complete result when using `vision_config_dict`. __UpperCamelCase = AltCLIPVisionConfig(**__A ).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: __UpperCamelCase = { str(__A ): value for key, value in _vision_config_dict['id2label'].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: __UpperCamelCase = ( f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different ''' f'''values. The value `vision_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: __UpperCamelCase = ( f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. ''' f'''The value `vision_config["{key}"]` will be overriden.''' ) logger.warning(__A ) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict ) if text_config is None: __UpperCamelCase = {} logger.info('`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.' ) if vision_config is None: __UpperCamelCase = {} logger.info('`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.' ) __UpperCamelCase = AltCLIPTextConfig(**__A ) __UpperCamelCase = AltCLIPVisionConfig(**__A ) __UpperCamelCase = projection_dim __UpperCamelCase = logit_scale_init_value __UpperCamelCase = 1.0 @classmethod def _lowerCamelCase ( cls : Union[str, Any] , __A : AltCLIPTextConfig , __A : AltCLIPVisionConfig , **__A : Optional[Any] ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__A ) def _lowerCamelCase ( self : List[Any] ): __UpperCamelCase = copy.deepcopy(self.__dict__ ) __UpperCamelCase = self.text_config.to_dict() __UpperCamelCase = self.vision_config.to_dict() __UpperCamelCase = self.__class__.model_type return output
53
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Dict=3 , UpperCAmelCase__ : List[Any]=30 , UpperCAmelCase__ : Any=400 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Optional[Any]=[0.5, 0.5, 0.5] , UpperCAmelCase__ : Any=[0.5, 0.5, 0.5] , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Optional[int]=1 / 255 , UpperCAmelCase__ : Optional[Any]=True , ) ->str: '''simple docstring''' A__ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333} A__ = parent A__ = batch_size A__ = num_channels A__ = min_resolution A__ = max_resolution A__ = do_resize A__ = size A__ = do_normalize A__ = image_mean A__ = image_std A__ = do_rescale A__ = rescale_factor A__ = do_pad def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]: '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int=False) ->Optional[Any]: '''simple docstring''' if not batched: A__ = image_inputs[0] if isinstance(UpperCAmelCase__ , Image.Image): A__ , A__ = image.size else: A__ , A__ = image.shape[1], image.shape[2] if w < h: A__ = int(self.size['''shortest_edge'''] * h / w) A__ = self.size['''shortest_edge'''] elif w > h: A__ = self.size['''shortest_edge'''] A__ = int(self.size['''shortest_edge'''] * w / h) else: A__ = self.size['''shortest_edge'''] A__ = self.size['''shortest_edge'''] else: A__ = [] for image in image_inputs: A__ , A__ = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) A__ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__: item[0])[0] A__ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__: item[1])[1] return expected_height, expected_width @require_torch @require_vision class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = DeformableDetrImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple: '''simple docstring''' A__ = DeformableDetrImageProcessingTester(self) @property def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(UpperCAmelCase__ , '''image_mean''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''image_std''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_rescale''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_pad''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''size''')) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int: '''simple docstring''' A__ = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333}) self.assertEqual(image_processor.do_pad , UpperCAmelCase__) A__ = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCAmelCase__) self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84}) self.assertEqual(image_processor.do_pad , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]: '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) # create random PIL images A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , Image.Image) # Test not batched input A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__) A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , np.ndarray) # Test not batched input A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE ( self : int) ->Tuple: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , torch.Tensor) # Test not batched input A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[str]: '''simple docstring''' A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''') with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''') as f: A__ = json.loads(f.read()) A__ = {'''image_id''': 39_769, '''annotations''': target} # encode them A__ = DeformableDetrImageProcessor() A__ = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , return_tensors='''pt''') # verify pixel values A__ = torch.Size([1, 3, 800, 1_066]) self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase__) A__ = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase__ , atol=1e-4)) # verify area A__ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase__)) # verify boxes A__ = torch.Size([6, 4]) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase__) A__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase__ , atol=1e-3)) # verify image_id A__ = torch.tensor([39_769]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase__)) # verify is_crowd A__ = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase__)) # verify class_labels A__ = torch.tensor([75, 75, 63, 65, 17, 17]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase__)) # verify orig_size A__ = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase__)) # verify size A__ = torch.tensor([800, 1_066]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase__)) @slow def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[int]: '''simple docstring''' A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''') with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''') as f: A__ = json.loads(f.read()) A__ = {'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target} A__ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''') # encode them A__ = DeformableDetrImageProcessor(format='''coco_panoptic''') A__ = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , masks_path=UpperCAmelCase__ , return_tensors='''pt''') # verify pixel values A__ = torch.Size([1, 3, 800, 1_066]) self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase__) A__ = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase__ , atol=1e-4)) # verify area A__ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase__)) # verify boxes A__ = torch.Size([6, 4]) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase__) A__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase__ , atol=1e-3)) # verify image_id A__ = torch.tensor([39_769]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase__)) # verify is_crowd A__ = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase__)) # verify class_labels A__ = torch.tensor([17, 17, 63, 75, 75, 93]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase__)) # verify masks A__ = 822_873 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCAmelCase__) # verify orig_size A__ = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase__)) # verify size A__ = torch.tensor([800, 1_066]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase__))
14
0
"""simple docstring""" from PIL import Image def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = image.size __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = image.load() for i in range(lowerCAmelCase_ ): for j in range(lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE = pixels[j, i] mean += pixel mean //= width * height for j in range(lowerCAmelCase_ ): for i in range(lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE = 255 if pixels[i, j] > mean else 0 return image if __name__ == "__main__": a__ : List[str] = mean_threshold(Image.open('''path_to_image''').convert('''L''')) image.save('''output_image_path''')
54
from __future__ import annotations import typing from collections.abc import Iterable import numpy as np _lowerCamelCase : str = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007 _lowerCamelCase : Tuple = typing.Union[np.floataa, int, float] # noqa: UP007 def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> VectorOut: """simple docstring""" return np.sqrt(np.sum((np.asarray(lowercase_ ) - np.asarray(lowercase_ )) ** 2 ) ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> VectorOut: """simple docstring""" return sum((va - va) ** 2 for va, va in zip(lowercase_ , lowercase_ ) ) ** (1 / 2) if __name__ == "__main__": def SCREAMING_SNAKE_CASE ( ) -> None: """simple docstring""" from timeit import timeit print('''Without Numpy''' ) print( timeit( '''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) ) print('''With Numpy''' ) print( timeit( '''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) ) benchmark()
14
0
'''simple docstring''' import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class snake_case : """simple docstring""" @staticmethod def snake_case ( *UpperCamelCase , **UpperCamelCase ): """simple docstring""" pass @is_pipeline_test @require_vision @require_torch class snake_case ( unittest.TestCase ): """simple docstring""" _lowerCamelCase = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = pipeline( "zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" ) lowerCamelCase_ = [ { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "candidate_labels": ["cat", "remote", "couch"], } ] return object_detector, examples def snake_case ( self , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = object_detector(examples[0] , threshold=0.0 ) lowerCamelCase_ = len(UpperCamelCase ) self.assertGreater(UpperCamelCase , 0 ) self.assertEqual( UpperCamelCase , [ { "score": ANY(UpperCamelCase ), "label": ANY(UpperCamelCase ), "box": {"xmin": ANY(UpperCamelCase ), "ymin": ANY(UpperCamelCase ), "xmax": ANY(UpperCamelCase ), "ymax": ANY(UpperCamelCase )}, } for i in range(UpperCamelCase ) ] , ) @require_tf @unittest.skip("Zero Shot Object Detection not implemented in TF" ) def snake_case ( self ): """simple docstring""" pass @require_torch def snake_case ( self ): """simple docstring""" lowerCamelCase_ = pipeline( "zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" ) lowerCamelCase_ = object_detector( "./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , ) self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [ {"score": 0.7_235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.7_218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.7_184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.6_748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6_656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6_614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6_456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, {"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}}, {"score": 0.6_419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, ] , ) lowerCamelCase_ = object_detector( [ { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "candidate_labels": ["cat", "remote", "couch"], } ] , threshold=0.64 , ) self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [ [ {"score": 0.7_235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.7_218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.7_184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.6_748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6_656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6_614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6_456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, {"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}}, {"score": 0.6_419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, ] ] , ) @require_torch @slow def snake_case ( self ): """simple docstring""" lowerCamelCase_ = pipeline("zero-shot-object-detection" ) lowerCamelCase_ = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , ) self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [ {"score": 0.2_868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, {"score": 0.2_537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, {"score": 0.1_474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}}, {"score": 0.1_208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}}, ] , ) lowerCamelCase_ = object_detector( [ { "image": "http://images.cocodataset.org/val2017/000000039769.jpg", "candidate_labels": ["cat", "remote", "couch"], }, { "image": "http://images.cocodataset.org/val2017/000000039769.jpg", "candidate_labels": ["cat", "remote", "couch"], }, ] , ) self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [ [ {"score": 0.2_868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, {"score": 0.2_537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, {"score": 0.1_474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}}, {"score": 0.1_208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}}, ], [ {"score": 0.2_868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, {"score": 0.2_537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, {"score": 0.1_474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}}, {"score": 0.1_208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}}, ], ] , ) @require_tf @unittest.skip("Zero Shot Object Detection not implemented in TF" ) def snake_case ( self ): """simple docstring""" pass @require_torch @slow def snake_case ( self ): """simple docstring""" lowerCamelCase_ = 0.2 lowerCamelCase_ = pipeline("zero-shot-object-detection" ) lowerCamelCase_ = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=UpperCamelCase , ) self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [ {"score": 0.2_868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, {"score": 0.2_537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, ] , ) @require_torch @slow def snake_case ( self ): """simple docstring""" lowerCamelCase_ = 2 lowerCamelCase_ = pipeline("zero-shot-object-detection" ) lowerCamelCase_ = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=UpperCamelCase , ) self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [ {"score": 0.2_868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, ] , )
55
from ...processing_utils import ProcessorMixin class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''SpeechT5FeatureExtractor''' UpperCAmelCase__ = '''SpeechT5Tokenizer''' def __init__( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple) ->Union[str, Any]: '''simple docstring''' super().__init__(UpperCAmelCase__ , UpperCAmelCase__) def __call__( self : Dict , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Any) ->Optional[Any]: '''simple docstring''' A__ = kwargs.pop('''audio''' , UpperCAmelCase__) A__ = kwargs.pop('''text''' , UpperCAmelCase__) A__ = kwargs.pop('''text_target''' , UpperCAmelCase__) A__ = kwargs.pop('''audio_target''' , UpperCAmelCase__) A__ = kwargs.pop('''sampling_rate''' , UpperCAmelCase__) if audio is not None and text is not None: raise ValueError( '''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''') if audio_target is not None and text_target is not None: raise ValueError( '''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''') if audio is None and audio_target is None and text is None and text_target is None: raise ValueError( '''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''') if audio is not None: A__ = self.feature_extractor(UpperCAmelCase__ , *UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , **UpperCAmelCase__) elif text is not None: A__ = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__) else: A__ = None if audio_target is not None: A__ = self.feature_extractor(audio_target=UpperCAmelCase__ , *UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , **UpperCAmelCase__) A__ = targets['''input_values'''] elif text_target is not None: A__ = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__) A__ = targets['''input_ids'''] else: A__ = None if inputs is None: return targets if targets is not None: A__ = labels A__ = targets.get('''attention_mask''') if decoder_attention_mask is not None: A__ = decoder_attention_mask return inputs def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : int) ->Optional[int]: '''simple docstring''' A__ = kwargs.pop('''input_values''' , UpperCAmelCase__) A__ = kwargs.pop('''input_ids''' , UpperCAmelCase__) A__ = kwargs.pop('''labels''' , UpperCAmelCase__) if input_values is not None and input_ids is not None: raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''') if input_values is None and input_ids is None and labels is None: raise ValueError( '''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''') if input_values is not None: A__ = self.feature_extractor.pad(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__) elif input_ids is not None: A__ = self.tokenizer.pad(UpperCAmelCase__ , **UpperCAmelCase__) else: A__ = None if labels is not None: if "input_ids" in labels or (isinstance(UpperCAmelCase__ , UpperCAmelCase__) and "input_ids" in labels[0]): A__ = self.tokenizer.pad(UpperCAmelCase__ , **UpperCAmelCase__) A__ = targets['''input_ids'''] else: A__ = self.feature_extractor.feature_size A__ = self.feature_extractor.num_mel_bins A__ = self.feature_extractor.pad(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__) A__ = feature_size_hack A__ = targets['''input_values'''] else: A__ = None if inputs is None: return targets if targets is not None: A__ = labels A__ = targets.get('''attention_mask''') if decoder_attention_mask is not None: A__ = decoder_attention_mask return inputs def SCREAMING_SNAKE_CASE ( self : Any , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Optional[Any]) ->Optional[Any]: '''simple docstring''' return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Union[str, Any]) ->Dict: '''simple docstring''' return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__)
14
0
'''simple docstring''' def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> float: '''simple docstring''' if principal <= 0: raise Exception('''Principal borrowed must be > 0''' ) if rate_per_annum < 0: raise Exception('''Rate of interest must be >= 0''' ) if years_to_repay <= 0 or not isinstance(__UpperCAmelCase, __UpperCAmelCase ): raise Exception('''Years to repay must be an integer > 0''' ) # Yearly rate is divided by 12 to get monthly rate snake_case_ = rate_per_annum / 12 # Years to repay is multiplied by 12 to get number of payments as payment is monthly snake_case_ = years_to_repay * 12 return ( principal * rate_per_month * (1 + rate_per_month) ** number_of_payments / ((1 + rate_per_month) ** number_of_payments - 1) ) if __name__ == "__main__": import doctest doctest.testmod()
56
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase : Tuple = logging.get_logger(__name__) _lowerCamelCase : str = { """microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""", } class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''git_vision_model''' def __init__( self : Any , UpperCAmelCase__ : Any=768 , UpperCAmelCase__ : int=3_072 , UpperCAmelCase__ : List[str]=12 , UpperCAmelCase__ : Dict=12 , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : List[Any]=224 , UpperCAmelCase__ : Union[str, Any]=16 , UpperCAmelCase__ : Union[str, Any]="quick_gelu" , UpperCAmelCase__ : Dict=1e-5 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Any=0.02 , **UpperCAmelCase__ : Any , ) ->Optional[int]: '''simple docstring''' super().__init__(**UpperCAmelCase__) A__ = hidden_size A__ = intermediate_size A__ = num_hidden_layers A__ = num_attention_heads A__ = num_channels A__ = patch_size A__ = image_size A__ = initializer_range A__ = attention_dropout A__ = layer_norm_eps A__ = hidden_act @classmethod def SCREAMING_SNAKE_CASE ( cls : Any , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : int) ->"PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(UpperCAmelCase__) A__ , A__ = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__) # get the vision config dict if we are loading from GITConfig if config_dict.get('''model_type''') == "git": A__ = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""") return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''git''' def __init__( self : Dict , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int=30_522 , UpperCAmelCase__ : Optional[int]=768 , UpperCAmelCase__ : Dict=6 , UpperCAmelCase__ : int=12 , UpperCAmelCase__ : List[str]=3_072 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=1_024 , UpperCAmelCase__ : List[str]=0.02 , UpperCAmelCase__ : Any=1e-12 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : List[Any]="absolute" , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : int=101 , UpperCAmelCase__ : Tuple=102 , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : List[str] , ) ->Any: '''simple docstring''' super().__init__(bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__) if vision_config is None: A__ = {} logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''') A__ = GitVisionConfig(**UpperCAmelCase__) A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = hidden_act A__ = intermediate_size A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = initializer_range A__ = layer_norm_eps A__ = position_embedding_type A__ = use_cache A__ = tie_word_embeddings A__ = num_image_with_embedding A__ = bos_token_id A__ = eos_token_id def SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]: '''simple docstring''' A__ = copy.deepcopy(self.__dict__) A__ = self.vision_config.to_dict() A__ = self.__class__.model_type return output
14
0
"""simple docstring""" import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class _UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' __UpperCAmelCase : Any =(DEISMultistepScheduler,) __UpperCAmelCase : Optional[int] =(("""num_inference_steps""", 2_5),) def snake_case ( self , **__a ): __lowerCAmelCase = { "num_train_timesteps": 10_00, "beta_start": 0.0_0_0_1, "beta_end": 0.0_2, "beta_schedule": "linear", "solver_order": 2, } config.update(**__a ) return config def snake_case ( self , __a=0 , **__a ): __lowerCAmelCase = dict(self.forward_default_kwargs ) __lowerCAmelCase = kwargs.pop("num_inference_steps" , __a ) __lowerCAmelCase = self.dummy_sample __lowerCAmelCase = 0.1 * sample __lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: __lowerCAmelCase = self.get_scheduler_config(**__a ) __lowerCAmelCase = scheduler_class(**__a ) scheduler.set_timesteps(__a ) # copy over dummy past residuals __lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__a ) __lowerCAmelCase = scheduler_class.from_pretrained(__a ) new_scheduler.set_timesteps(__a ) # copy over dummy past residuals __lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] __lowerCAmelCase , __lowerCAmelCase = sample, sample for t in range(__a , time_step + scheduler.config.solver_order + 1 ): __lowerCAmelCase = scheduler.step(__a , __a , __a , **__a ).prev_sample __lowerCAmelCase = new_scheduler.step(__a , __a , __a , **__a ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def snake_case ( self ): pass def snake_case ( self , __a=0 , **__a ): __lowerCAmelCase = dict(self.forward_default_kwargs ) __lowerCAmelCase = kwargs.pop("num_inference_steps" , __a ) __lowerCAmelCase = self.dummy_sample __lowerCAmelCase = 0.1 * sample __lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: __lowerCAmelCase = self.get_scheduler_config() __lowerCAmelCase = scheduler_class(**__a ) scheduler.set_timesteps(__a ) # copy over dummy past residuals (must be after setting timesteps) __lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__a ) __lowerCAmelCase = scheduler_class.from_pretrained(__a ) # copy over dummy past residuals new_scheduler.set_timesteps(__a ) # copy over dummy past residual (must be after setting timesteps) __lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] __lowerCAmelCase = scheduler.step(__a , __a , __a , **__a ).prev_sample __lowerCAmelCase = new_scheduler.step(__a , __a , __a , **__a ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def snake_case ( self , __a=None , **__a ): if scheduler is None: __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config(**__a ) __lowerCAmelCase = scheduler_class(**__a ) __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config(**__a ) __lowerCAmelCase = scheduler_class(**__a ) __lowerCAmelCase = 10 __lowerCAmelCase = self.dummy_model() __lowerCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(__a ) for i, t in enumerate(scheduler.timesteps ): __lowerCAmelCase = model(__a , __a ) __lowerCAmelCase = scheduler.step(__a , __a , __a ).prev_sample return sample def snake_case ( self ): __lowerCAmelCase = dict(self.forward_default_kwargs ) __lowerCAmelCase = kwargs.pop("num_inference_steps" , __a ) for scheduler_class in self.scheduler_classes: __lowerCAmelCase = self.get_scheduler_config() __lowerCAmelCase = scheduler_class(**__a ) __lowerCAmelCase = self.dummy_sample __lowerCAmelCase = 0.1 * sample if num_inference_steps is not None and hasattr(__a , "set_timesteps" ): scheduler.set_timesteps(__a ) elif num_inference_steps is not None and not hasattr(__a , "set_timesteps" ): __lowerCAmelCase = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) __lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] __lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] __lowerCAmelCase = scheduler.timesteps[5] __lowerCAmelCase = scheduler.timesteps[6] __lowerCAmelCase = scheduler.step(__a , __a , __a , **__a ).prev_sample __lowerCAmelCase = scheduler.step(__a , __a , __a , **__a ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def snake_case ( self ): # make sure that iterating over schedulers with same config names gives same results # for defaults __lowerCAmelCase = DEISMultistepScheduler(**self.get_scheduler_config() ) __lowerCAmelCase = self.full_loop(scheduler=__a ) __lowerCAmelCase = torch.mean(torch.abs(__a ) ) assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3 __lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config ) __lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config ) __lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config ) __lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config ) __lowerCAmelCase = self.full_loop(scheduler=__a ) __lowerCAmelCase = torch.mean(torch.abs(__a ) ) assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3 def snake_case ( self ): for timesteps in [25, 50, 1_00, 9_99, 10_00]: self.check_over_configs(num_train_timesteps=__a ) def snake_case ( self ): self.check_over_configs(thresholding=__a ) for order in [1, 2, 3]: for solver_type in ["logrho"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=__a , prediction_type=__a , sample_max_value=__a , algorithm_type="deis" , solver_order=__a , solver_type=__a , ) def snake_case ( self ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__a ) def snake_case ( self ): for algorithm_type in ["deis"]: for solver_type in ["logrho"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=__a , solver_type=__a , prediction_type=__a , algorithm_type=__a , ) __lowerCAmelCase = self.full_loop( solver_order=__a , solver_type=__a , prediction_type=__a , algorithm_type=__a , ) assert not torch.isnan(__a ).any(), "Samples have nan numbers" def snake_case ( self ): self.check_over_configs(lower_order_final=__a ) self.check_over_configs(lower_order_final=__a ) def snake_case ( self ): for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]: self.check_over_forward(num_inference_steps=__a , time_step=0 ) def snake_case ( self ): __lowerCAmelCase = self.full_loop() __lowerCAmelCase = torch.mean(torch.abs(__a ) ) assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3 def snake_case ( self ): __lowerCAmelCase = self.full_loop(prediction_type="v_prediction" ) __lowerCAmelCase = torch.mean(torch.abs(__a ) ) assert abs(result_mean.item() - 0.0_9_1 ) < 1e-3 def snake_case ( self ): __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config(thresholding=__a , dynamic_thresholding_ratio=0 ) __lowerCAmelCase = scheduler_class(**__a ) __lowerCAmelCase = 10 __lowerCAmelCase = self.dummy_model() __lowerCAmelCase = self.dummy_sample_deter.half() scheduler.set_timesteps(__a ) for i, t in enumerate(scheduler.timesteps ): __lowerCAmelCase = model(__a , __a ) __lowerCAmelCase = scheduler.step(__a , __a , __a ).prev_sample assert sample.dtype == torch.floataa
57
import requests from bsa import BeautifulSoup def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str: """simple docstring""" A__ = BeautifulSoup(requests.get(lowercase_ , params=lowercase_ ).content , '''html.parser''' ) A__ = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} ) A__ = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' ) return anchors[2].get_text() if __name__ == "__main__": _lowerCamelCase : Optional[Any] = { """title""": ( """Precisely geometry controlled microsupercapacitors for ultrahigh areal """ """capacitance, volumetric capacitance, and energy density""" ), """journal""": """Chem. Mater.""", """volume""": 30, """pages""": """3979-3990""", """year""": 2018, """hl""": """en""", } print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
14
0
'''simple docstring''' import copy from collections import OrderedDict from typing import Dict, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowercase_ = logging.get_logger(__name__) lowercase_ = { """facebook/detr-resnet-50""": """https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json""", # See all DETR models at https://huggingface.co/models?filter=detr } class a_ ( snake_case_ ): '''simple docstring''' UpperCamelCase = '''detr''' UpperCamelCase = ['''past_key_values'''] UpperCamelCase = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self , A=True , A=None , A=3 , A=100 , A=6 , A=2048 , A=8 , A=6 , A=2048 , A=8 , A=0.0 , A=0.0 , A=True , A="relu" , A=256 , A=0.1 , A=0.0 , A=0.0 , A=0.02 , A=1.0 , A=False , A="sine" , A="resnet50" , A=True , A=False , A=1 , A=5 , A=2 , A=1 , A=1 , A=5 , A=2 , A=0.1 , **A , ) -> List[str]: if backbone_config is not None and use_timm_backbone: raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" ) if not use_timm_backbone: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) _SCREAMING_SNAKE_CASE = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] ) elif isinstance(A , A ): _SCREAMING_SNAKE_CASE = backbone_config.get("""model_type""" ) _SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type] _SCREAMING_SNAKE_CASE = config_class.from_dict(A ) # set timm attributes to None _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None, None, None _SCREAMING_SNAKE_CASE = use_timm_backbone _SCREAMING_SNAKE_CASE = backbone_config _SCREAMING_SNAKE_CASE = num_channels _SCREAMING_SNAKE_CASE = num_queries _SCREAMING_SNAKE_CASE = d_model _SCREAMING_SNAKE_CASE = encoder_ffn_dim _SCREAMING_SNAKE_CASE = encoder_layers _SCREAMING_SNAKE_CASE = encoder_attention_heads _SCREAMING_SNAKE_CASE = decoder_ffn_dim _SCREAMING_SNAKE_CASE = decoder_layers _SCREAMING_SNAKE_CASE = decoder_attention_heads _SCREAMING_SNAKE_CASE = dropout _SCREAMING_SNAKE_CASE = attention_dropout _SCREAMING_SNAKE_CASE = activation_dropout _SCREAMING_SNAKE_CASE = activation_function _SCREAMING_SNAKE_CASE = init_std _SCREAMING_SNAKE_CASE = init_xavier_std _SCREAMING_SNAKE_CASE = encoder_layerdrop _SCREAMING_SNAKE_CASE = decoder_layerdrop _SCREAMING_SNAKE_CASE = encoder_layers _SCREAMING_SNAKE_CASE = auxiliary_loss _SCREAMING_SNAKE_CASE = position_embedding_type _SCREAMING_SNAKE_CASE = backbone _SCREAMING_SNAKE_CASE = use_pretrained_backbone _SCREAMING_SNAKE_CASE = dilation # Hungarian matcher _SCREAMING_SNAKE_CASE = class_cost _SCREAMING_SNAKE_CASE = bbox_cost _SCREAMING_SNAKE_CASE = giou_cost # Loss coefficients _SCREAMING_SNAKE_CASE = mask_loss_coefficient _SCREAMING_SNAKE_CASE = dice_loss_coefficient _SCREAMING_SNAKE_CASE = bbox_loss_coefficient _SCREAMING_SNAKE_CASE = giou_loss_coefficient _SCREAMING_SNAKE_CASE = eos_coefficient super().__init__(is_encoder_decoder=A , **A ) @property def snake_case_( self ) -> int: return self.encoder_attention_heads @property def snake_case_( self ) -> int: return self.d_model @classmethod def snake_case_( cls , A , **A ) -> int: return cls(backbone_config=A , **A ) def snake_case_( self ) -> Dict[str, any]: _SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: _SCREAMING_SNAKE_CASE = self.backbone_config.to_dict() _SCREAMING_SNAKE_CASE = self.__class__.model_type return output class a_ ( snake_case_ ): '''simple docstring''' UpperCamelCase = version.parse('''1.11''' ) @property def snake_case_( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""pixel_mask""", {0: """batch"""}), ] ) @property def snake_case_( self ) -> float: return 1e-5 @property def snake_case_( self ) -> int: return 12
58
import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[Any]: """simple docstring""" A__ = StableDiffusionPipeline.from_pretrained(lowercase_ , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors A__ = load_file(lowercase_ ) A__ = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: A__ = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' ) A__ = pipeline.text_encoder else: A__ = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' ) A__ = pipeline.unet # find the target layer A__ = layer_infos.pop(0 ) while len(lowercase_ ) > -1: try: A__ = curr_layer.__getattr__(lowercase_ ) if len(lowercase_ ) > 0: A__ = layer_infos.pop(0 ) elif len(lowercase_ ) == 0: break except Exception: if len(lowercase_ ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: A__ = layer_infos.pop(0 ) A__ = [] if "lora_down" in key: pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) ) pair_keys.append(lowercase_ ) else: pair_keys.append(lowercase_ ) pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: A__ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) A__ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ ).unsqueeze(2 ).unsqueeze(3 ) else: A__ = state_dict[pair_keys[0]].to(torch.floataa ) A__ = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ ) # update visited list for item in pair_keys: visited.append(lowercase_ ) return pipeline if __name__ == "__main__": _lowerCamelCase : Tuple = argparse.ArgumentParser() parser.add_argument( """--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format.""" ) parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors""" ) parser.add_argument( """--lora_prefix_text_encoder""", default="""lora_te""", type=str, help="""The prefix of text encoder weight in safetensors""", ) parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""") parser.add_argument( """--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not.""" ) parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""") _lowerCamelCase : Tuple = parser.parse_args() _lowerCamelCase : List[Any] = args.base_model_path _lowerCamelCase : Optional[int] = args.checkpoint_path _lowerCamelCase : Dict = args.dump_path _lowerCamelCase : Optional[Any] = args.lora_prefix_unet _lowerCamelCase : Optional[int] = args.lora_prefix_text_encoder _lowerCamelCase : List[Any] = args.alpha _lowerCamelCase : int = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) _lowerCamelCase : Tuple = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
14
0
from __future__ import annotations def UpperCamelCase ( __lowerCamelCase : dict , __lowerCamelCase : str ): snake_case , snake_case : Union[str, Any] = set(__lowerCamelCase ), [start] while stack: snake_case : str = stack.pop() explored.add(__lowerCamelCase ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(__lowerCamelCase ) return explored __lowerCamelCase = { """A""": ["""B""", """C""", """D"""], """B""": ["""A""", """D""", """E"""], """C""": ["""A""", """F"""], """D""": ["""B""", """D"""], """E""": ["""B""", """F"""], """F""": ["""C""", """E""", """G"""], """G""": ["""F"""], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, """A"""))
59
import os import pytest from transformers.dynamic_module_utils import get_imports _lowerCamelCase : Any = """ import os """ _lowerCamelCase : Optional[int] = """ def foo(): import os return False """ _lowerCamelCase : List[Any] = """ def foo(): def bar(): if True: import os return False return bar() """ _lowerCamelCase : List[Any] = """ import os try: import bar except ImportError: raise ValueError() """ _lowerCamelCase : Union[str, Any] = """ import os def foo(): try: import bar except ImportError: raise ValueError() """ _lowerCamelCase : List[Any] = """ import os try: import bar except (ImportError, AttributeError): raise ValueError() """ _lowerCamelCase : List[Any] = """ import os try: import bar except ImportError as e: raise ValueError() """ _lowerCamelCase : str = """ import os try: import bar except: raise ValueError() """ _lowerCamelCase : Optional[Any] = """ import os try: import bar import baz except ImportError: raise ValueError() """ _lowerCamelCase : Any = """ import os try: import bar import baz except ImportError: x = 1 raise ValueError() """ _lowerCamelCase : Dict = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize('''case''' , lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int: """simple docstring""" A__ = os.path.join(lowercase_ , '''test_file.py''' ) with open(lowercase_ , '''w''' ) as _tmp_file: _tmp_file.write(lowercase_ ) A__ = get_imports(lowercase_ ) assert parsed_imports == ["os"]
14
0
"""simple docstring""" def _snake_case ( _snake_case : list ): if len(_snake_case ) <= 1: return lst lowerCAmelCase : Any = 1 while i < len(_snake_case ): if lst[i - 1] <= lst[i]: i += 1 else: lowerCAmelCase, lowerCAmelCase : Tuple = lst[i], lst[i - 1] i -= 1 if i == 0: lowerCAmelCase : List[str] = 1 return lst if __name__ == "__main__": snake_case__ : Optional[int] = input('''Enter numbers separated by a comma:\n''').strip() snake_case__ : Any = [int(item) for item in user_input.split(''',''')] print(gnome_sort(unsorted))
60
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int: """simple docstring""" return int(input_a == input_a == 0 ) def SCREAMING_SNAKE_CASE ( ) -> None: """simple docstring""" print('''Truth Table of NOR Gate:''' ) print('''| Input 1 | Input 2 | Output |''' ) print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" ) print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" ) print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" ) print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" ) if __name__ == "__main__": import doctest doctest.testmod() main()
14
0
"""simple docstring""" import dataclasses import json import sys import types from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError from copy import copy from enum import Enum from inspect import isclass from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints import yaml _a = NewType('DataClass', Any) _a = NewType('DataClassType', Any) def __a ( __lowerCamelCase ): if isinstance(__lowerCamelCase, __lowerCamelCase ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError( f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" ) def __a ( __lowerCamelCase ): UpperCAmelCase_ : int = {str(__lowerCamelCase ): choice for choice in choices} return lambda __lowerCamelCase : str_to_choice.get(__lowerCamelCase, __lowerCamelCase ) def __a ( *, __lowerCamelCase = None, __lowerCamelCase = None, __lowerCamelCase = dataclasses.MISSING, __lowerCamelCase = dataclasses.MISSING, __lowerCamelCase = None, **__lowerCamelCase, ): if metadata is None: # Important, don't use as default param in function signature because dict is mutable and shared across function calls UpperCAmelCase_ : Optional[Any] = {} if aliases is not None: UpperCAmelCase_ : List[str] = aliases if help is not None: UpperCAmelCase_ : List[Any] = help return dataclasses.field(metadata=__lowerCamelCase, default=__lowerCamelCase, default_factory=__lowerCamelCase, **__lowerCamelCase ) class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Iterable[DataClassType] def __init__( self , lowercase_ , **lowercase_ ): """simple docstring""" # To make the default appear when using --help if "formatter_class" not in kwargs: UpperCAmelCase_ : int = ArgumentDefaultsHelpFormatter super().__init__(**lowercase_ ) if dataclasses.is_dataclass(lowercase_ ): UpperCAmelCase_ : Tuple = [dataclass_types] UpperCAmelCase_ : Optional[Any] = list(lowercase_ ) for dtype in self.dataclass_types: self._add_dataclass_arguments(lowercase_ ) @staticmethod def UpperCamelCase__ ( lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Any = F"""--{field.name}""" UpperCAmelCase_ : int = field.metadata.copy() # field.metadata is not used at all by Data Classes, # it is provided as a third-party extension mechanism. if isinstance(field.type , lowercase_ ): raise RuntimeError( "Unresolved type detected, which should have been done with the help of " "`typing.get_type_hints` method by default" ) UpperCAmelCase_ : Union[str, Any] = kwargs.pop("aliases" , [] ) if isinstance(lowercase_ , lowercase_ ): UpperCAmelCase_ : Any = [aliases] UpperCAmelCase_ : Optional[Any] = getattr(field.type , "__origin__" , field.type ) if origin_type is Union or (hasattr(lowercase_ , "UnionType" ) and isinstance(lowercase_ , types.UnionType )): if str not in field.type.__args__ and ( len(field.type.__args__ ) != 2 or type(lowercase_ ) not in field.type.__args__ ): raise ValueError( "Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because" " the argument parser only supports one type per argument." F""" Problem encountered in field '{field.name}'.""" ) if type(lowercase_ ) not in field.type.__args__: # filter `str` in Union UpperCAmelCase_ : str = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1] UpperCAmelCase_ : Dict = getattr(field.type , "__origin__" , field.type ) elif bool not in field.type.__args__: # filter `NoneType` in Union (except for `Union[bool, NoneType]`) UpperCAmelCase_ : str = ( field.type.__args__[0] if isinstance(lowercase_ , field.type.__args__[1] ) else field.type.__args__[1] ) UpperCAmelCase_ : List[str] = getattr(field.type , "__origin__" , field.type ) # A variable to store kwargs for a boolean field, if needed # so that we can init a `no_*` complement argument (see below) UpperCAmelCase_ : List[Any] = {} if origin_type is Literal or (isinstance(field.type , lowercase_ ) and issubclass(field.type , lowercase_ )): if origin_type is Literal: UpperCAmelCase_ : List[Any] = field.type.__args__ else: UpperCAmelCase_ : Union[str, Any] = [x.value for x in field.type] UpperCAmelCase_ : Optional[Any] = make_choice_type_function(kwargs["choices"] ) if field.default is not dataclasses.MISSING: UpperCAmelCase_ : str = field.default else: UpperCAmelCase_ : str = True elif field.type is bool or field.type == Optional[bool]: # Copy the currect kwargs to use to instantiate a `no_*` complement argument below. # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument UpperCAmelCase_ : int = copy(lowercase_ ) # Hack because type=bool in argparse does not behave as we want. UpperCAmelCase_ : Union[str, Any] = string_to_bool if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING): # Default value is False if we have no default when of type bool. UpperCAmelCase_ : Union[str, Any] = False if field.default is dataclasses.MISSING else field.default # This is the value that will get picked if we don't include --field_name in any way UpperCAmelCase_ : Optional[int] = default # This tells argparse we accept 0 or 1 value after --field_name UpperCAmelCase_ : int = "?" # This is the value that will get picked if we do --field_name (without value) UpperCAmelCase_ : Tuple = True elif isclass(lowercase_ ) and issubclass(lowercase_ , lowercase_ ): UpperCAmelCase_ : List[str] = field.type.__args__[0] UpperCAmelCase_ : str = "+" if field.default_factory is not dataclasses.MISSING: UpperCAmelCase_ : List[str] = field.default_factory() elif field.default is dataclasses.MISSING: UpperCAmelCase_ : Optional[Any] = True else: UpperCAmelCase_ : Tuple = field.type if field.default is not dataclasses.MISSING: UpperCAmelCase_ : Optional[int] = field.default elif field.default_factory is not dataclasses.MISSING: UpperCAmelCase_ : Any = field.default_factory() else: UpperCAmelCase_ : List[Any] = True parser.add_argument(lowercase_ , *lowercase_ , **lowercase_ ) # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added. # Order is important for arguments with the same destination! # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down # here and we do not need those changes/additional keys. if field.default is True and (field.type is bool or field.type == Optional[bool]): UpperCAmelCase_ : Any = False parser.add_argument(F"""--no_{field.name}""" , action="store_false" , dest=field.name , **lowercase_ ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" if hasattr(lowercase_ , "_argument_group_name" ): UpperCAmelCase_ : int = self.add_argument_group(dtype._argument_group_name ) else: UpperCAmelCase_ : int = self try: UpperCAmelCase_ : Dict[str, type] = get_type_hints(lowercase_ ) except NameError: raise RuntimeError( F"""Type resolution failed for {dtype}. Try declaring the class in global scope or """ "removing line of `from __future__ import annotations` which opts in Postponed " "Evaluation of Annotations (PEP 563)" ) except TypeError as ex: # Remove this block when we drop Python 3.9 support if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(lowercase_ ): UpperCAmelCase_ : Dict = ".".join(map(lowercase_ , sys.version_info[:3] ) ) raise RuntimeError( F"""Type resolution failed for {dtype} on Python {python_version}. Try removing """ "line of `from __future__ import annotations` which opts in union types as " "`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To " "support Python versions that lower than 3.10, you need to use " "`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of " "`X | None`." ) from ex raise for field in dataclasses.fields(lowercase_ ): if not field.init: continue UpperCAmelCase_ : Optional[Any] = type_hints[field.name] self._parse_dataclass_field(lowercase_ , lowercase_ ) def UpperCamelCase__ ( self , lowercase_=None , lowercase_=False , lowercase_=True , lowercase_=None , lowercase_=None , ): """simple docstring""" if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )): UpperCAmelCase_ : int = [] if args_filename: args_files.append(Path(lowercase_ ) ) elif look_for_args_file and len(sys.argv ): args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) ) # args files specified via command line flag should overwrite default args files so we add them last if args_file_flag: # Create special parser just to extract the args_file_flag values UpperCAmelCase_ : str = ArgumentParser() args_file_parser.add_argument(lowercase_ , type=lowercase_ , action="append" ) # Use only remaining args for further parsing (remove the args_file_flag) UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = args_file_parser.parse_known_args(args=lowercase_ ) UpperCAmelCase_ : List[str] = vars(lowercase_ ).get(args_file_flag.lstrip("-" ) , lowercase_ ) if cmd_args_file_paths: args_files.extend([Path(lowercase_ ) for p in cmd_args_file_paths] ) UpperCAmelCase_ : int = [] for args_file in args_files: if args_file.exists(): file_args += args_file.read_text().split() # in case of duplicate arguments the last one has precedence # args specified via the command line should overwrite args from files, so we add them last UpperCAmelCase_ : Optional[Any] = file_args + args if args is not None else file_args + sys.argv[1:] UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.parse_known_args(args=lowercase_ ) UpperCAmelCase_ : Union[str, Any] = [] for dtype in self.dataclass_types: UpperCAmelCase_ : List[Any] = {f.name for f in dataclasses.fields(lowercase_ ) if f.init} UpperCAmelCase_ : List[str] = {k: v for k, v in vars(lowercase_ ).items() if k in keys} for k in keys: delattr(lowercase_ , lowercase_ ) UpperCAmelCase_ : Dict = dtype(**lowercase_ ) outputs.append(lowercase_ ) if len(namespace.__dict__ ) > 0: # additional namespace. outputs.append(lowercase_ ) if return_remaining_strings: return (*outputs, remaining_args) else: if remaining_args: raise ValueError(F"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" ) return (*outputs,) def UpperCamelCase__ ( self , lowercase_ , lowercase_ = False ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = set(args.keys() ) UpperCAmelCase_ : Tuple = [] for dtype in self.dataclass_types: UpperCAmelCase_ : Optional[int] = {f.name for f in dataclasses.fields(lowercase_ ) if f.init} UpperCAmelCase_ : Optional[int] = {k: v for k, v in args.items() if k in keys} unused_keys.difference_update(inputs.keys() ) UpperCAmelCase_ : int = dtype(**lowercase_ ) outputs.append(lowercase_ ) if not allow_extra_keys and unused_keys: raise ValueError(F"""Some keys are not used by the HfArgumentParser: {sorted(lowercase_ )}""" ) return tuple(lowercase_ ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ = False ): """simple docstring""" with open(Path(lowercase_ ) , encoding="utf-8" ) as open_json_file: UpperCAmelCase_ : Tuple = json.loads(open_json_file.read() ) UpperCAmelCase_ : Optional[int] = self.parse_dict(lowercase_ , allow_extra_keys=lowercase_ ) return tuple(lowercase_ ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ = False ): """simple docstring""" UpperCAmelCase_ : Tuple = self.parse_dict(yaml.safe_load(Path(lowercase_ ).read_text() ) , allow_extra_keys=lowercase_ ) return tuple(lowercase_ )
61
import os import sys import unittest _lowerCamelCase : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) _lowerCamelCase : Any = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""") _lowerCamelCase : str = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""") class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple: '''simple docstring''' A__ = get_test_to_tester_mapping(UpperCAmelCase__) A__ = get_test_to_tester_mapping(UpperCAmelCase__) A__ = {'''BertModelTest''': '''BertModelTester'''} A__ = { '''BlipModelTest''': '''BlipModelTester''', '''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''', '''BlipTextModelTest''': '''BlipTextModelTester''', '''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''', '''BlipVQAModelTest''': '''BlipVQAModelTester''', '''BlipVisionModelTest''': '''BlipVisionModelTester''', } self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__) self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[Any]: '''simple docstring''' A__ = get_model_to_test_mapping(UpperCAmelCase__) A__ = get_model_to_test_mapping(UpperCAmelCase__) A__ = { '''BertForMaskedLM''': ['''BertModelTest'''], '''BertForMultipleChoice''': ['''BertModelTest'''], '''BertForNextSentencePrediction''': ['''BertModelTest'''], '''BertForPreTraining''': ['''BertModelTest'''], '''BertForQuestionAnswering''': ['''BertModelTest'''], '''BertForSequenceClassification''': ['''BertModelTest'''], '''BertForTokenClassification''': ['''BertModelTest'''], '''BertLMHeadModel''': ['''BertModelTest'''], '''BertModel''': ['''BertModelTest'''], } A__ = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''], '''BlipModel''': ['''BlipModelTest'''], '''BlipTextModel''': ['''BlipTextModelTest'''], '''BlipVisionModel''': ['''BlipVisionModelTest'''], } self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__) self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->str: '''simple docstring''' A__ = get_model_to_tester_mapping(UpperCAmelCase__) A__ = get_model_to_tester_mapping(UpperCAmelCase__) A__ = { '''BertForMaskedLM''': ['''BertModelTester'''], '''BertForMultipleChoice''': ['''BertModelTester'''], '''BertForNextSentencePrediction''': ['''BertModelTester'''], '''BertForPreTraining''': ['''BertModelTester'''], '''BertForQuestionAnswering''': ['''BertModelTester'''], '''BertForSequenceClassification''': ['''BertModelTester'''], '''BertForTokenClassification''': ['''BertModelTester'''], '''BertLMHeadModel''': ['''BertModelTester'''], '''BertModel''': ['''BertModelTester'''], } A__ = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''], '''BlipModel''': ['''BlipModelTester'''], '''BlipTextModel''': ['''BlipTextModelTester'''], '''BlipVisionModel''': ['''BlipVisionModelTester'''], } self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__) self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
14
0
from math import factorial def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int = 1_00 ): return sum(map(SCREAMING_SNAKE_CASE__ , str(factorial(SCREAMING_SNAKE_CASE__ ) ) ) ) if __name__ == "__main__": print(solution(int(input('Enter the Number: ').strip())))
62
import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class UpperCamelCase_ : '''simple docstring''' def __init__( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int = 13 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 128 , UpperCAmelCase__ : Optional[Any]=[16, 32, 64, 128] , UpperCAmelCase__ : int = 7 , UpperCAmelCase__ : int = 4 , UpperCAmelCase__ : int = 37 , UpperCAmelCase__ : str = "gelu" , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : int = 10 , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 128 , UpperCAmelCase__ : List[int] = [2, 2, 2, 2] , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , ) ->List[Any]: '''simple docstring''' A__ = parent A__ = batch_size A__ = image_size A__ = patch_size A__ = num_channels A__ = is_training A__ = use_labels A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = type_sequence_label_size A__ = initializer_range A__ = encoder_stride A__ = num_attention_outputs A__ = embed_dim A__ = embed_dim + 1 A__ = resolution A__ = depths A__ = hidden_sizes A__ = dim A__ = mlp_expansion_ratio def SCREAMING_SNAKE_CASE ( self : List[Any]) ->str: '''simple docstring''' A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size) A__ = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE ( self : int) ->str: '''simple docstring''' return EfficientFormerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict) ->Dict: '''simple docstring''' A__ = TFEfficientFormerModel(config=UpperCAmelCase__) A__ = model(UpperCAmelCase__ , training=UpperCAmelCase__) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str) ->Union[str, Any]: '''simple docstring''' A__ = self.type_sequence_label_size A__ = TFEfficientFormerForImageClassification(UpperCAmelCase__) A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__ , training=UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) # test greyscale images A__ = 1 A__ = TFEfficientFormerForImageClassification(UpperCAmelCase__) A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def SCREAMING_SNAKE_CASE ( self : int) ->List[str]: '''simple docstring''' A__ = self.prepare_config_and_inputs() A__ , A__ , A__ = config_and_inputs A__ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = ( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) UpperCAmelCase__ = ( { '''feature-extraction''': TFEfficientFormerModel, '''image-classification''': ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[str]: '''simple docstring''' A__ = TFEfficientFormerModelTester(self) A__ = ConfigTester( self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37) def SCREAMING_SNAKE_CASE ( self : int) ->Any: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''EfficientFormer does not use inputs_embeds''') def SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict: '''simple docstring''' pass @unittest.skip(reason='''EfficientFormer does not support input and output embeddings''') def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]: '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[Any]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(UpperCAmelCase__) A__ = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : str) ->Any: '''simple docstring''' def check_hidden_states_output(UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict): A__ = model_class(UpperCAmelCase__) A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__) A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A__ = getattr( self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1) self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__) if hasattr(self.model_tester , '''encoder_seq_length'''): A__ = self.model_tester.encoder_seq_length if hasattr(self.model_tester , '''chunk_length''') and self.model_tester.chunk_length > 1: A__ = seq_length * self.model_tester.chunk_length else: A__ = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , ) if config.is_encoder_decoder: A__ = outputs.decoder_hidden_states self.asseretIsInstance(UpperCAmelCase__ , (list, tuple)) self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__) A__ = getattr(self.model_tester , '''seq_length''' , UpperCAmelCase__) A__ = getattr(self.model_tester , '''decoder_seq_length''' , UpperCAmelCase__) self.assertListEqual( list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , ) A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = True check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A__ = True check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict=False) ->int: '''simple docstring''' A__ = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__) @unittest.skip(reason='''EfficientFormer does not implement masked image modeling yet''') def SCREAMING_SNAKE_CASE ( self : str) ->str: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Any) ->Tuple: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__) @slow def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]: '''simple docstring''' for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = TFEfficientFormerModel.from_pretrained(UpperCAmelCase__) self.assertIsNotNone(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Any) ->str: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = True A__ = getattr(self.model_tester , '''seq_length''' , UpperCAmelCase__) A__ = getattr(self.model_tester , '''encoder_seq_length''' , UpperCAmelCase__) A__ = getattr(self.model_tester , '''key_length''' , UpperCAmelCase__) A__ = getattr(self.model_tester , '''chunk_length''' , UpperCAmelCase__) if chunk_length is not None and hasattr(self.model_tester , '''num_hashes'''): A__ = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: A__ = True A__ = False A__ = True A__ = model_class(UpperCAmelCase__) A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__) A__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(UpperCAmelCase__) , self.model_tester.num_attention_outputs) # check that output_attentions also work using config del inputs_dict["output_attentions"] A__ = True A__ = model_class(UpperCAmelCase__) A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__) A__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(UpperCAmelCase__) , self.model_tester.num_attention_outputs) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , ) else: self.assertListEqual( list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , ) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model A__ = model_class(UpperCAmelCase__) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes A__ = { key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=UpperCAmelCase__) for key, val in model.input_signature.items() if key in model.dummy_inputs } A__ = model(UpperCAmelCase__) self.assertTrue(outputs_dict is not None) def SCREAMING_SNAKE_CASE ( ) -> Any: """simple docstring""" A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: '''simple docstring''' return ( EfficientFormerImageProcessor.from_pretrained('''snap-research/efficientformer-l1-300''') if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any: '''simple docstring''' A__ = TFEfficientFormerForImageClassification.from_pretrained('''snap-research/efficientformer-l1-300''') A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''tf''') # forward pass A__ = model(**UpperCAmelCase__ , training=UpperCAmelCase__) # verify the logits A__ = tf.TensorShape((1, 1_000)) self.assertEqual(outputs.logits.shape , UpperCAmelCase__) A__ = tf.constant([-0.0555, 0.4825, -0.0852]) self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4)) @slow def SCREAMING_SNAKE_CASE ( self : Dict) ->int: '''simple docstring''' A__ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( '''snap-research/efficientformer-l1-300''') A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''tf''') # forward pass A__ = model(**UpperCAmelCase__ , training=UpperCAmelCase__) # verify the logits A__ = tf.TensorShape((1, 1_000)) self.assertEqual(outputs.logits.shape , UpperCAmelCase__) A__ = tf.constant([-0.1312, 0.4353, -1.0499]) self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4))
14
0
'''simple docstring''' import math def _lowerCamelCase ( lowercase : int = 100 ) -> int: _a = sum(i * i for i in range(1 , n + 1 ) ) _a = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) ) return square_of_sum - sum_of_squares if __name__ == "__main__": print(f"""{solution() = }""")
63
from __future__ import annotations def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> tuple[float, list[float]]: """simple docstring""" A__ = list(range(len(lowercase_ ) ) ) A__ = [v / w for v, w in zip(lowercase_ , lowercase_ )] index.sort(key=lambda lowercase_ : ratio[i] , reverse=lowercase_ ) A__ = 0 A__ = [0] * len(lowercase_ ) for i in index: if weight[i] <= capacity: A__ = 1 max_value += value[i] capacity -= weight[i] else: A__ = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
14
0
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_fnet import FNetTokenizer else: A_ = None A_ = logging.get_logger(__name__) A_ = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} A_ = { '''vocab_file''': { '''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/spiece.model''', '''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/spiece.model''', }, '''tokenizer_file''': { '''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json''', '''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json''', }, } A_ = { '''google/fnet-base''': 5_12, '''google/fnet-large''': 5_12, } A_ = '''▁''' class lowercase( __a ): '''simple docstring''' lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ = ["input_ids", "token_type_ids"] lowercase__ = FNetTokenizer def __init__( self: List[str], a_: str=None, a_: Optional[Any]=None, a_: Tuple=False, a_: Any=True, a_: List[str]=True, a_: List[Any]="<unk>", a_: Optional[Any]="[SEP]", a_: Optional[int]="<pad>", a_: Optional[Any]="[CLS]", a_: int="[MASK]", **a_: Optional[Any], ): '''simple docstring''' _snake_case : str = ( AddedToken(a_, lstrip=a_, rstrip=a_, normalized=a_ ) if isinstance(a_, a_ ) else mask_token ) super().__init__( a_, tokenizer_file=a_, do_lower_case=a_, remove_space=a_, keep_accents=a_, unk_token=a_, sep_token=a_, pad_token=a_, cls_token=a_, mask_token=a_, **a_, ) _snake_case : Union[str, Any] = do_lower_case _snake_case : Dict = remove_space _snake_case : int = keep_accents _snake_case : Dict = vocab_file _snake_case : str = False if not self.vocab_file else True def UpperCamelCase_ ( self: Optional[Any], a_: List[int], a_: Optional[List[int]] = None ): '''simple docstring''' _snake_case : List[Any] = [self.sep_token_id] _snake_case : Optional[int] = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def UpperCamelCase_ ( self: Union[str, Any], a_: List[int], a_: Optional[List[int]] = None ): '''simple docstring''' _snake_case : Any = [self.sep_token_id] _snake_case : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase_ ( self: List[str], a_: str, a_: Optional[str] = None ): '''simple docstring''' if not os.path.isdir(a_ ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return _snake_case : List[Any] = os.path.join( a_, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ): copyfile(self.vocab_file, a_ ) return (out_vocab_file,)
64
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[Any]: """simple docstring""" A__ = args.log_outputs A__ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] ) # load metric A__ = load_metric('''wer''' ) A__ = load_metric('''cer''' ) # compute metrics A__ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) A__ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) # print & log results A__ = f"""WER: {wer_result}\nCER: {cer_result}""" print(lowercase_ ) with open(f"""{dataset_id}_eval_results.txt""" , '''w''' ) as f: f.write(lowercase_ ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: A__ = f"""log_{dataset_id}_predictions.txt""" A__ = f"""log_{dataset_id}_targets.txt""" with open(lowercase_ , '''w''' ) as p, open(lowercase_ , '''w''' ) as t: # mapping function to write output def write_to_file(lowercase_ , lowercase_ ): p.write(f"""{i}""" + '''\n''' ) p.write(batch['''prediction'''] + '''\n''' ) t.write(f"""{i}""" + '''\n''' ) t.write(batch['''target'''] + '''\n''' ) result.map(lowercase_ , with_indices=lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str: """simple docstring""" A__ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training A__ = re.sub(lowercase_ , '''''' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! A__ = ['''\n\n''', '''\n''', ''' ''', ''' '''] for t in token_sequences_to_ignore: A__ = ''' '''.join(text.split(lowercase_ ) ) return text def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]: """simple docstring""" A__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowercase_ ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor A__ = AutoFeatureExtractor.from_pretrained(args.model_id ) A__ = feature_extractor.sampling_rate # resample audio A__ = dataset.cast_column('''audio''' , Audio(sampling_rate=lowercase_ ) ) # load eval pipeline if args.device is None: A__ = 0 if torch.cuda.is_available() else -1 A__ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(lowercase_ ): A__ = asr( batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) A__ = prediction['''text'''] A__ = normalize_text(batch['''sentence'''] ) return batch # run inference on all examples A__ = dataset.map(lowercase_ , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(lowercase_ , lowercase_ ) if __name__ == "__main__": _lowerCamelCase : List[Any] = argparse.ArgumentParser() parser.add_argument( """--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers""" ) parser.add_argument( """--dataset""", type=str, required=True, help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""", ) parser.add_argument( """--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice""" ) parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""") parser.add_argument( """--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds.""" ) parser.add_argument( """--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second.""" ) parser.add_argument( """--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis.""" ) parser.add_argument( """--device""", type=int, default=None, help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""", ) _lowerCamelCase : str = parser.parse_args() main(args)
14
0
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available from ...utils import OptionalDependencyNotAvailable UpperCamelCase__ = {'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = ['DPTFeatureExtractor'] UpperCamelCase__ = ['DPTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ 'DPT_PRETRAINED_MODEL_ARCHIVE_LIST', 'DPTForDepthEstimation', 'DPTForSemanticSegmentation', 'DPTModel', 'DPTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_dpt import DPTFeatureExtractor from .image_processing_dpt import DPTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dpt import ( DPT_PRETRAINED_MODEL_ARCHIVE_LIST, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel, DPTPreTrainedModel, ) else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
65
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) _lowerCamelCase : int = { """configuration_blip""": [ """BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BlipConfig""", """BlipTextConfig""", """BlipVisionConfig""", ], """processing_blip""": ["""BlipProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Tuple = ["""BlipImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : List[Any] = [ """BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """BlipModel""", """BlipPreTrainedModel""", """BlipForConditionalGeneration""", """BlipForQuestionAnswering""", """BlipVisionModel""", """BlipTextModel""", """BlipForImageTextRetrieval""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Optional[Any] = [ """TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBlipModel""", """TFBlipPreTrainedModel""", """TFBlipForConditionalGeneration""", """TFBlipForQuestionAnswering""", """TFBlipVisionModel""", """TFBlipTextModel""", """TFBlipForImageTextRetrieval""", ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys _lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
14
0
"""simple docstring""" import unittest import torch from torch import nn from diffusers.models.activations import get_activation class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self: Optional[Any] ) -> Union[str, Any]: snake_case_ :List[str] = get_activation("""swish""" ) self.assertIsInstance(snake_case , nn.SiLU ) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def lowerCAmelCase_ ( self: Optional[Any] ) -> Optional[Any]: snake_case_ :Optional[int] = get_activation("""silu""" ) self.assertIsInstance(snake_case , nn.SiLU ) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def lowerCAmelCase_ ( self: Optional[Any] ) -> Any: snake_case_ :Optional[Any] = get_activation("""mish""" ) self.assertIsInstance(snake_case , nn.Mish ) self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[Any]: snake_case_ :List[Any] = get_activation("""gelu""" ) self.assertIsInstance(snake_case , nn.GELU ) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
66
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCamelCase : List[str] = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : List[Any] = [ """VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""", """ViTMSNModel""", """ViTMSNForImageClassification""", """ViTMSNPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys _lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
14
0
'''simple docstring''' def __lowerCAmelCase ( UpperCamelCase__ ) -> int: __lowerCamelCase = 0 while num > 0: digit_sum += num % 10 num //= 10 return digit_sum def __lowerCAmelCase ( UpperCamelCase__ = 1_00 ) -> int: __lowerCamelCase = 1 __lowerCamelCase = 2 for i in range(2 , max_n + 1 ): __lowerCamelCase = pre_numerator __lowerCamelCase = 2 * i // 3 if i % 3 == 0 else 1 __lowerCamelCase = cur_numerator __lowerCamelCase = e_cont * pre_numerator + temp return sum_digits(UpperCamelCase__ ) if __name__ == "__main__": print(f'{solution() = }')
67
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> "list[int]": """simple docstring""" if upper_limit < 0: raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' ) A__ = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 A__ = 1 if upper_limit > 0: A__ = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(lowercase_ ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""") print("""\n*** Enter -1 at any time to quit ***""") print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""") try: while True: _lowerCamelCase : List[Any] = int(input().strip()) if N < 0: print("""\n********* Goodbye!! ************""") break else: print(F'''The Catalan numbers from 0 through {N} are:''') print(catalan_numbers(N)) print("""Try another upper limit for the sequence: """, end="""""") except (NameError, ValueError): print("""\n********* Invalid input, goodbye! ************\n""") import doctest doctest.testmod()
14
0
from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class a__ : """simple docstring""" __lowerCamelCase = 42 __lowerCamelCase = None # Automatically constructed __lowerCamelCase = "dict" __lowerCamelCase = None __lowerCamelCase = field(default='Translation' , init=snake_case , repr=snake_case ) def __call__( self ) -> int: '''simple docstring''' return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def UpperCamelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]: '''simple docstring''' from .features import Value return {k: Value("string" ) for k in sorted(self.languages )} @dataclass class a__ : """simple docstring""" __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None # Automatically constructed __lowerCamelCase = "dict" __lowerCamelCase = None __lowerCamelCase = field(default='TranslationVariableLanguages' , init=snake_case , repr=snake_case ) def UpperCamelCase ( self ) -> str: '''simple docstring''' A__ = sorted(set(self.languages ) ) if self.languages else None A__ = len(self.languages ) if self.languages else None def __call__( self ) -> Any: '''simple docstring''' return pa.struct({"language": pa.list_(pa.string() ), "translation": pa.list_(pa.string() )} ) def UpperCamelCase ( self , lowercase ) -> Union[str, Any]: '''simple docstring''' A__ = set(self.languages ) if self.languages and set(lowercase ) - lang_set: raise ValueError( F'Some languages in example ({", ".join(sorted(set(lowercase ) - lang_set ) )}) are not in valid set ({", ".join(lowercase )}).' ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. A__ = [] for lang, text in translation_dict.items(): if isinstance(lowercase , lowercase ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. A__ , A__ = zip(*sorted(lowercase ) ) return {"language": languages, "translation": translations} def UpperCamelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]: '''simple docstring''' from .features import Sequence, Value return { "language": Sequence(Value("string" ) ), "translation": Sequence(Value("string" ) ), }
68
import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict: """simple docstring""" A__ = args.pruning_method A__ = args.threshold A__ = args.model_name_or_path.rstrip('''/''' ) A__ = args.target_model_path print(f"""Load fine-pruned model from {model_name_or_path}""" ) A__ = torch.load(os.path.join(lowercase_ , '''pytorch_model.bin''' ) ) A__ = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: A__ = tensor print(f"""Copied layer {name}""" ) elif "classifier" in name or "qa_output" in name: A__ = tensor print(f"""Copied layer {name}""" ) elif "bias" in name: A__ = tensor print(f"""Copied layer {name}""" ) else: if pruning_method == "magnitude": A__ = MagnitudeBinarizer.apply(inputs=lowercase_ , threshold=lowercase_ ) A__ = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "topK": if "mask_scores" in name: continue A__ = name[:-6] A__ = model[f"""{prefix_}mask_scores"""] A__ = TopKBinarizer.apply(lowercase_ , lowercase_ ) A__ = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue A__ = name[:-6] A__ = model[f"""{prefix_}mask_scores"""] A__ = ThresholdBinarizer.apply(lowercase_ , lowercase_ , lowercase_ ) A__ = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "l0": if "mask_scores" in name: continue A__ = name[:-6] A__ = model[f"""{prefix_}mask_scores"""] A__ , A__ = -0.1, 1.1 A__ = torch.sigmoid(lowercase_ ) A__ = s * (r - l) + l A__ = s_bar.clamp(min=0.0 , max=1.0 ) A__ = tensor * mask print(f"""Pruned layer {name}""" ) else: raise ValueError('''Unknown pruning method''' ) if target_model_path is None: A__ = os.path.join( os.path.dirname(lowercase_ ) , f"""bertarized_{os.path.basename(lowercase_ )}""" ) if not os.path.isdir(lowercase_ ): shutil.copytree(lowercase_ , lowercase_ ) print(f"""\nCreated folder {target_model_path}""" ) torch.save(lowercase_ , os.path.join(lowercase_ , '''pytorch_model.bin''' ) ) print('''\nPruned model saved! See you later!''' ) if __name__ == "__main__": _lowerCamelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument( """--pruning_method""", choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""], type=str, required=True, help=( """Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,""" """ sigmoied_threshold = Soft movement pruning)""" ), ) parser.add_argument( """--threshold""", type=float, required=False, help=( """For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.""" """For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.""" """Not needed for `l0`""" ), ) parser.add_argument( """--model_name_or_path""", type=str, required=True, help="""Folder containing the model that was previously fine-pruned""", ) parser.add_argument( """--target_model_path""", default=None, type=str, required=False, help="""Folder containing the model that was previously fine-pruned""", ) _lowerCamelCase : int = parser.parse_args() main(args)
14
0
"""simple docstring""" import sys __UpperCamelCase = ( '''73167176531330624919225119674426574742355349194934''' '''96983520312774506326239578318016984801869478851843''' '''85861560789112949495459501737958331952853208805511''' '''12540698747158523863050715693290963295227443043557''' '''66896648950445244523161731856403098711121722383113''' '''62229893423380308135336276614282806444486645238749''' '''30358907296290491560440772390713810515859307960866''' '''70172427121883998797908792274921901699720888093776''' '''65727333001053367881220235421809751254540594752243''' '''52584907711670556013604839586446706324415722155397''' '''53697817977846174064955149290862569321978468622482''' '''83972241375657056057490261407972968652414535100474''' '''82166370484403199890008895243450658541227588666881''' '''16427171479924442928230863465674813919123162824586''' '''17866458359124566529476545682848912883142607690042''' '''24219022671055626321111109370544217506941658960408''' '''07198403850962455444362981230987879927244284909188''' '''84580156166097919133875499200524063689912560717606''' '''05886116467109405077541002256983155200055935729725''' '''71636269561882670428252483600823257530420752963450''' ) def UpperCAmelCase ( UpperCAmelCase ) -> int: snake_case_ = 1 for digit in s: product *= int(UpperCAmelCase ) return product def UpperCAmelCase ( UpperCAmelCase = N ) -> int: snake_case_ = -sys.maxsize - 1 snake_case_ = n[:13] snake_case_ = 13 while cur_index < len(UpperCAmelCase ) - 13: if int(n[cur_index] ) >= int(substr[0] ): snake_case_ = substr[1:] + n[cur_index] cur_index += 1 else: snake_case_ = max(UpperCAmelCase , str_eval(UpperCAmelCase ) ) snake_case_ = n[cur_index : cur_index + 13] cur_index += 13 return largest_product if __name__ == "__main__": print(F"""{solution() = }""")
69
_lowerCamelCase : Optional[int] = 65521 def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int: """simple docstring""" A__ = 1 A__ = 0 for plain_chr in plain_text: A__ = (a + ord(lowercase_ )) % MOD_ADLER A__ = (b + a) % MOD_ADLER return (b << 16) | a
14
0
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple import torch from torch import nn from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel from transformers.utils import ModelOutput @dataclass class UpperCAmelCase ( snake_case_ ): _lowercase: Optional[torch.FloatTensor] = None _lowercase: torch.FloatTensor = None _lowercase: Optional[Tuple[torch.FloatTensor]] = None _lowercase: Optional[Tuple[torch.FloatTensor]] = None class UpperCAmelCase ( snake_case_ ): def __init__( self : Optional[Any] , __snake_case : List[Any]=1 , __snake_case : Dict=0 , __snake_case : Optional[Any]=2 , __snake_case : Optional[int]=5_12 , __snake_case : int="cls" , __snake_case : Tuple=False , __snake_case : Dict=True , **__snake_case : Union[str, Any] , ) -> Union[str, Any]: super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case ) _lowerCAmelCase = project_dim _lowerCAmelCase = pooler_fn _lowerCAmelCase = learn_encoder _lowerCAmelCase = use_attention_mask class UpperCAmelCase ( snake_case_ ): _lowercase: List[str] = [r'''pooler''', r'''logit_scale'''] _lowercase: List[str] = [r'''position_ids''', r'''predictions.decoder.bias'''] _lowercase: Optional[Any] = '''roberta''' _lowercase: Optional[Any] = RobertaSeriesConfig def __init__( self : str , __snake_case : Optional[Any] ) -> Optional[Any]: super().__init__(__snake_case ) _lowerCAmelCase = XLMRobertaModel(__snake_case ) _lowerCAmelCase = nn.Linear(config.hidden_size , config.project_dim ) _lowerCAmelCase = getattr(__snake_case , """has_pre_transformation""" , __snake_case ) if self.has_pre_transformation: _lowerCAmelCase = nn.Linear(config.hidden_size , config.project_dim ) _lowerCAmelCase = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps ) self.post_init() def lowercase__ ( self : List[str] , __snake_case : Optional[torch.Tensor] = None , __snake_case : Optional[torch.Tensor] = None , __snake_case : Optional[torch.Tensor] = None , __snake_case : Optional[torch.Tensor] = None , __snake_case : Optional[torch.Tensor] = None , __snake_case : Optional[torch.Tensor] = None , __snake_case : Optional[torch.Tensor] = None , __snake_case : Optional[torch.Tensor] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , ) -> int: _lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict _lowerCAmelCase = self.base_model( input_ids=__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , position_ids=__snake_case , head_mask=__snake_case , inputs_embeds=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , output_attentions=__snake_case , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__snake_case , ) if self.has_pre_transformation: _lowerCAmelCase = outputs["""hidden_states"""][-2] _lowerCAmelCase = self.pre_LN(__snake_case ) _lowerCAmelCase = self.transformation_pre(__snake_case ) return TransformationModelOutput( projection_state=__snake_case , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , ) else: _lowerCAmelCase = self.transformation(outputs.last_hidden_state ) return TransformationModelOutput( projection_state=__snake_case , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
70
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer _lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) _lowerCamelCase : Tuple = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} _lowerCamelCase : Union[str, Any] = { """vocab_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } _lowerCamelCase : str = { """vocab_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } _lowerCamelCase : str = { """vocab_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json""" ), }, } _lowerCamelCase : Any = { """facebook/dpr-ctx_encoder-single-nq-base""": 512, """facebook/dpr-ctx_encoder-multiset-base""": 512, } _lowerCamelCase : List[str] = { """facebook/dpr-question_encoder-single-nq-base""": 512, """facebook/dpr-question_encoder-multiset-base""": 512, } _lowerCamelCase : Tuple = { """facebook/dpr-reader-single-nq-base""": 512, """facebook/dpr-reader-multiset-base""": 512, } _lowerCamelCase : Optional[Any] = { """facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True}, } _lowerCamelCase : Optional[int] = { """facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True}, } _lowerCamelCase : Optional[Any] = { """facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True}, } class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ = DPRContextEncoderTokenizer class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ = DPRQuestionEncoderTokenizer _lowerCamelCase : int = collections.namedtuple( """DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""] ) _lowerCamelCase : Any = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""]) _lowerCamelCase : Dict = r""" Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. """ @add_start_docstrings(UpperCAmelCase__ ) class UpperCamelCase_ : '''simple docstring''' def __call__( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Union[bool, str] = False , UpperCAmelCase__ : Union[bool, str] = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Optional[bool] = None , **UpperCAmelCase__ : Optional[int] , ) ->BatchEncoding: '''simple docstring''' if titles is None and texts is None: return super().__call__( UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , ) elif titles is None or texts is None: A__ = titles if texts is None else texts return super().__call__( UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , ) A__ = titles if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [titles] A__ = texts if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [texts] A__ = len(UpperCAmelCase__) A__ = questions if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [questions] * n_passages assert len(UpperCAmelCase__) == len( UpperCAmelCase__), f"""There should be as many titles than texts but got {len(UpperCAmelCase__)} titles and {len(UpperCAmelCase__)} texts.""" A__ = super().__call__(UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__)['''input_ids'''] A__ = super().__call__(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__)['''input_ids'''] A__ = { '''input_ids''': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(UpperCAmelCase__ , UpperCAmelCase__) ] } if return_attention_mask is not False: A__ = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids]) A__ = attention_mask return self.pad(UpperCAmelCase__ , padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : BatchEncoding , UpperCAmelCase__ : DPRReaderOutput , UpperCAmelCase__ : int = 16 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 4 , ) ->List[DPRSpanPrediction]: '''simple docstring''' A__ = reader_input['''input_ids'''] A__ , A__ , A__ = reader_output[:3] A__ = len(UpperCAmelCase__) A__ = sorted(range(UpperCAmelCase__) , reverse=UpperCAmelCase__ , key=relevance_logits.__getitem__) A__ = [] for doc_id in sorted_docs: A__ = list(input_ids[doc_id]) # assuming question & title information is at the beginning of the sequence A__ = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: A__ = sequence_ids.index(self.pad_token_id) else: A__ = len(UpperCAmelCase__) A__ = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCAmelCase__ , top_spans=UpperCAmelCase__ , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCAmelCase__ , start_index=UpperCAmelCase__ , end_index=UpperCAmelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1]) , )) if len(UpperCAmelCase__) >= num_spans: break return nbest_spans_predictions[:num_spans] def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , ) ->List[DPRSpanPrediction]: '''simple docstring''' A__ = [] for start_index, start_score in enumerate(UpperCAmelCase__): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]): scores.append(((start_index, start_index + answer_length), start_score + end_score)) A__ = sorted(UpperCAmelCase__ , key=lambda UpperCAmelCase__: x[1] , reverse=UpperCAmelCase__) A__ = [] for (start_index, end_index), score in scores: assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]""" A__ = end_index - start_index + 1 assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}""" if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals): continue chosen_span_intervals.append((start_index, end_index)) if len(UpperCAmelCase__) == top_spans: break return chosen_span_intervals @add_end_docstrings(UpperCAmelCase__ ) class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = READER_PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ = ['''input_ids''', '''attention_mask'''] UpperCAmelCase__ = DPRReaderTokenizer
14
0
import gc import unittest import numpy as np import torch from diffusers import ( AudioDiffusionPipeline, AutoencoderKL, DDIMScheduler, DDPMScheduler, DiffusionPipeline, Mel, UNetaDConditionModel, UNetaDModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class __A ( unittest.TestCase ): """simple docstring""" def __lowercase ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() @property def __lowercase ( self ): """simple docstring""" torch.manual_seed(0 ) __UpperCamelCase : List[str] =UNetaDModel( sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , ) return model @property def __lowercase ( self ): """simple docstring""" torch.manual_seed(0 ) __UpperCamelCase : List[str] =UNetaDConditionModel( sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , cross_attention_dim=10 , ) return model @property def __lowercase ( self ): """simple docstring""" torch.manual_seed(0 ) __UpperCamelCase : Dict =AutoencoderKL( sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , ) __UpperCamelCase : List[Any] =UNetaDModel( sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , ) return vqvae, unet @slow def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Union[str, Any] ='cpu' # ensure determinism for the device-dependent torch.Generator __UpperCamelCase : int =Mel( x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , ) __UpperCamelCase : Dict =DDPMScheduler() __UpperCamelCase : int =AudioDiffusionPipeline(vqvae=lowerCamelCase__ , unet=self.dummy_unet , mel=lowerCamelCase__ , scheduler=lowerCamelCase__ ) __UpperCamelCase : str =pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __UpperCamelCase : Union[str, Any] =torch.Generator(device=lowerCamelCase__ ).manual_seed(42 ) __UpperCamelCase : List[str] =pipe(generator=lowerCamelCase__ , steps=4 ) __UpperCamelCase : Tuple =output.audios[0] __UpperCamelCase : str =output.images[0] __UpperCamelCase : List[str] =torch.Generator(device=lowerCamelCase__ ).manual_seed(42 ) __UpperCamelCase : Optional[Any] =pipe(generator=lowerCamelCase__ , steps=4 , return_dict=lowerCamelCase__ ) __UpperCamelCase : Dict =output[0][0] assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length) assert ( image.height == self.dummy_unet.config.sample_size[0] and image.width == self.dummy_unet.config.sample_size[1] ) __UpperCamelCase : Union[str, Any] =np.frombuffer(image.tobytes() , dtype='uint8' )[:10] __UpperCamelCase : Optional[int] =np.frombuffer(image_from_tuple.tobytes() , dtype='uint8' )[:10] __UpperCamelCase : List[Any] =np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0 __UpperCamelCase : List[Any] =Mel( x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , ) __UpperCamelCase : str =DDIMScheduler() __UpperCamelCase : int =self.dummy_vqvae_and_unet __UpperCamelCase : Any =AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=lowerCamelCase__ , scheduler=lowerCamelCase__ ) __UpperCamelCase : Tuple =pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) np.random.seed(0 ) __UpperCamelCase : Optional[int] =np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) ) __UpperCamelCase : Dict =torch.Generator(device=lowerCamelCase__ ).manual_seed(42 ) __UpperCamelCase : Dict =pipe(raw_audio=lowerCamelCase__ , generator=lowerCamelCase__ , start_step=5 , steps=10 ) __UpperCamelCase : Union[str, Any] =output.images[0] assert ( image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0] and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1] ) __UpperCamelCase : List[Any] =np.frombuffer(image.tobytes() , dtype='uint8' )[:10] __UpperCamelCase : Union[str, Any] =np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 __UpperCamelCase : Dict =self.dummy_unet_condition __UpperCamelCase : List[str] =AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=lowerCamelCase__ , mel=lowerCamelCase__ , scheduler=lowerCamelCase__ ) __UpperCamelCase : Dict =pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) np.random.seed(0 ) __UpperCamelCase : Optional[Any] =torch.rand((1, 1, 10) ) __UpperCamelCase : int =pipe(generator=lowerCamelCase__ , encoding=lowerCamelCase__ ) __UpperCamelCase : Optional[Any] =output.images[0] __UpperCamelCase : Dict =np.frombuffer(image.tobytes() , dtype='uint8' )[:10] __UpperCamelCase : List[str] =np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 @slow @require_torch_gpu class __A ( unittest.TestCase ): """simple docstring""" def __lowercase ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Any =torch_device __UpperCamelCase : Any =DiffusionPipeline.from_pretrained('teticio/audio-diffusion-ddim-256' ) __UpperCamelCase : str =pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __UpperCamelCase : List[str] =torch.Generator(device=lowerCamelCase__ ).manual_seed(42 ) __UpperCamelCase : Any =pipe(generator=lowerCamelCase__ ) __UpperCamelCase : List[str] =output.audios[0] __UpperCamelCase : List[str] =output.images[0] assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length) assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1] __UpperCamelCase : List[str] =np.frombuffer(image.tobytes() , dtype='uint8' )[:10] __UpperCamelCase : Union[str, Any] =np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
71
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase : Any = logging.get_logger(__name__) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''encoder-decoder''' UpperCAmelCase__ = True def __init__( self : List[str] , **UpperCAmelCase__ : Union[str, Any]) ->List[Any]: '''simple docstring''' super().__init__(**UpperCAmelCase__) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" A__ = kwargs.pop('''encoder''') A__ = encoder_config.pop('''model_type''') A__ = kwargs.pop('''decoder''') A__ = decoder_config.pop('''model_type''') from ..auto.configuration_auto import AutoConfig A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__) A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__) A__ = True @classmethod def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Union[str, Any]) ->PretrainedConfig: '''simple docstring''' logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''') A__ = True A__ = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]: '''simple docstring''' A__ = copy.deepcopy(self.__dict__) A__ = self.encoder.to_dict() A__ = self.decoder.to_dict() A__ = self.__class__.model_type return output
14
0
"""simple docstring""" from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def snake_case_ ( A_ : str = "isbn/0140328726" ): '''simple docstring''' _lowerCamelCase : List[str] = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes if new_olid.count('''/''' ) != 1: _lowerCamelCase : Tuple = F'''{olid} is not a valid Open Library olid''' raise ValueError(A_ ) return requests.get(F'''https://openlibrary.org/{new_olid}.json''' ).json() def snake_case_ ( A_ : dict ): '''simple docstring''' _lowerCamelCase : List[str] = { '''title''': '''Title''', '''publish_date''': '''Publish date''', '''authors''': '''Authors''', '''number_of_pages''': '''Number of pages:''', '''first_sentence''': '''First sentence''', '''isbn_10''': '''ISBN (10)''', '''isbn_13''': '''ISBN (13)''', } _lowerCamelCase : Union[str, Any] = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} _lowerCamelCase : Dict = [ get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors'''] ] _lowerCamelCase : List[str] = data['''First sentence''']['''value'''] for key, value in data.items(): if isinstance(A_, A_ ): _lowerCamelCase : Any = ''', '''.join(A_ ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: lowerCAmelCase__ = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(F"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""") continue print(F"""\nSearching Open Library for ISBN: {isbn}...\n""") try: lowerCAmelCase__ = summarize_book(get_openlibrary_data(F"""isbn/{isbn}""")) print('''\n'''.join(F"""{key}: {value}""" for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(F"""Sorry, there are no results for ISBN: {isbn}.""")
72
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any: """simple docstring""" A__ = [0] * len(lowercase_ ) A__ = [] A__ = [1] * len(lowercase_ ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(lowercase_ ) ): if indegree[i] == 0: queue.append(lowercase_ ) while queue: A__ = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: A__ = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(lowercase_ ) print(max(lowercase_ ) ) # Adjacency list of Graph _lowerCamelCase : Optional[int] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
14
0
# Function to print upper half of diamond (pyramid) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str: for i in range(0 , lowerCamelCase__ ): for _ in range(0 , n - i - 1 ): # printing spaces print(' ' , end='' ) for _ in range(0 , i + 1 ): # printing stars print('* ' , end='' ) print() def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple: for i in range(lowerCamelCase__ , 0 , -1 ): for _ in range(lowerCamelCase__ , 0 , -1 ): # printing stars print('* ' , end='' ) print() for _ in range(n - i + 1 , 0 , -1 ): # printing spaces print(' ' , end='' ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any: if n <= 0: print(' ... .... nothing printing :(' ) return floyd(lowerCamelCase__ ) # upper half reverse_floyd(lowerCamelCase__ ) # lower half if __name__ == "__main__": print(r"""| /\ | |- | |- |--| |\ /| |-""") print(r"""|/ \| |- |_ |_ |__| | \/ | |_""") a =1 while K: a =int(input("""enter the number and , and see the magic : """)) print() pretty_print(user_number) a =int(input("""press 0 to exit... and 1 to continue...""")) print("""Good Bye...""")
73
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline _lowerCamelCase : Optional[Any] = datasets.utils.logging.get_logger(__name__) @dataclass class UpperCamelCase_ ( datasets.BuilderConfig ): '''simple docstring''' UpperCAmelCase__ = None UpperCAmelCase__ = "utf-8" UpperCAmelCase__ = None UpperCAmelCase__ = None UpperCAmelCase__ = True # deprecated UpperCAmelCase__ = None # deprecated UpperCAmelCase__ = 10 << 20 # 10MB UpperCAmelCase__ = None class UpperCamelCase_ ( datasets.ArrowBasedBuilder ): '''simple docstring''' UpperCAmelCase__ = JsonConfig def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str: '''simple docstring''' if self.config.block_size is not None: logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''') A__ = self.config.block_size if self.config.use_threads is not True: logger.warning( '''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''') if self.config.newlines_in_values is not None: raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''') return datasets.DatasetInfo(features=self.config.features) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : List[Any]) ->Dict: '''simple docstring''' if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""") A__ = dl_manager.download_and_extract(self.config.data_files) if isinstance(UpperCAmelCase__ , (str, list, tuple)): A__ = data_files if isinstance(UpperCAmelCase__ , UpperCAmelCase__): A__ = [files] A__ = [dl_manager.iter_files(UpperCAmelCase__) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files})] A__ = [] for split_name, files in data_files.items(): if isinstance(UpperCAmelCase__ , UpperCAmelCase__): A__ = [files] A__ = [dl_manager.iter_files(UpperCAmelCase__) for file in files] splits.append(datasets.SplitGenerator(name=UpperCAmelCase__ , gen_kwargs={'''files''': files})) return splits def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : pa.Table) ->pa.Table: '''simple docstring''' if self.config.features is not None: # adding missing columns for column_name in set(self.config.features) - set(pa_table.column_names): A__ = self.config.features.arrow_schema.field(UpperCAmelCase__).type A__ = pa_table.append_column(UpperCAmelCase__ , pa.array([None] * len(UpperCAmelCase__) , type=UpperCAmelCase__)) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example A__ = table_cast(UpperCAmelCase__ , self.config.features.arrow_schema) return pa_table def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Tuple) ->str: '''simple docstring''' for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase__)): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(UpperCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: A__ = json.load(UpperCAmelCase__) # We keep only the field we are interested in A__ = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(UpperCAmelCase__ , (list, tuple)): A__ = set().union(*[row.keys() for row in dataset]) A__ = {col: [row.get(UpperCAmelCase__) for row in dataset] for col in keys} else: A__ = dataset A__ = pa.Table.from_pydict(UpperCAmelCase__) yield file_idx, self._cast_table(UpperCAmelCase__) # If the file has one json object per line else: with open(UpperCAmelCase__ , '''rb''') as f: A__ = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small A__ = max(self.config.chunksize // 32 , 16 << 10) A__ = ( self.config.encoding_errors if self.config.encoding_errors is not None else '''strict''' ) while True: A__ = f.read(self.config.chunksize) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(UpperCAmelCase__) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": A__ = batch.decode(self.config.encoding , errors=UpperCAmelCase__).encode('''utf-8''') try: while True: try: A__ = paj.read_json( io.BytesIO(UpperCAmelCase__) , read_options=paj.ReadOptions(block_size=UpperCAmelCase__)) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(UpperCAmelCase__ , pa.ArrowInvalid) and "straddling" not in str(UpperCAmelCase__) or block_size > len(UpperCAmelCase__) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( f"""Batch of {len(UpperCAmelCase__)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""") block_size *= 2 except pa.ArrowInvalid as e: try: with open( UpperCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: A__ = json.load(UpperCAmelCase__) except json.JSONDecodeError: logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""") raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(UpperCAmelCase__ , UpperCAmelCase__): # list is the only sequence type supported in JSON try: A__ = set().union(*[row.keys() for row in dataset]) A__ = {col: [row.get(UpperCAmelCase__) for row in dataset] for col in keys} A__ = pa.Table.from_pydict(UpperCAmelCase__) except (pa.ArrowInvalid, AttributeError) as e: logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""") raise ValueError(f"""Not able to read records in the JSON file at {file}.""") from None yield file_idx, self._cast_table(UpperCAmelCase__) break else: logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""") raise ValueError( f"""Not able to read records in the JSON file at {file}. """ f"""You should probably indicate the field of the JSON file containing your records. """ f"""This JSON file contain the following fields: {str(list(dataset.keys()))}. """ f"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase__) batch_idx += 1
14
0
"""simple docstring""" import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin _lowercase = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''') @require_sentencepiece @require_tokenizers class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: List[str] = PegasusTokenizer _lowerCamelCase: Tuple = PegasusTokenizerFast _lowerCamelCase: List[Any] = True _lowerCamelCase: List[Any] = True def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any: super().setUp() # We have a SentencePiece fixture for testing A = PegasusTokenizer(A_ ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]: return PegasusTokenizer.from_pretrained('google/pegasus-large' ) def _SCREAMING_SNAKE_CASE ( self : Dict ,**A_ : int ) -> PegasusTokenizer: return PegasusTokenizer.from_pretrained(self.tmpdirname ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Dict ) -> str: return ("This is a test", "This is a test") def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: A = '</s>' A = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) ,A_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) ,A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple: A = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,'<pad>' ) self.assertEqual(vocab_keys[1] ,'</s>' ) self.assertEqual(vocab_keys[-1] ,'v' ) self.assertEqual(len(A_ ) ,1103 ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple: self.assertEqual(self.get_tokenizer().vocab_size ,1103 ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: A = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) A = self.tokenizer_class.from_pretrained(self.tmpdirname ) A = ( 'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important' ' </s> <pad> <pad> <pad>' ) A = rust_tokenizer([raw_input_str] ,return_tensors=A_ ,add_special_tokens=A_ ).input_ids[0] A = py_tokenizer([raw_input_str] ,return_tensors=A_ ,add_special_tokens=A_ ).input_ids[0] self.assertListEqual(A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : str ) -> List[str]: A = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word A = '<mask_1> To ensure a <mask_2> flow of bank resolutions.' A = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1] A = tokenizer([raw_input_str] ,return_tensors=A_ ).input_ids[0] self.assertListEqual(A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]: A = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_6103 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 103 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1024 A = 'To ensure a smooth flow of bank resolutions.' A = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1] A = tokenizer([raw_input_str] ,return_tensors=A_ ).input_ids[0] self.assertListEqual(A_ ,A_ ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: A = ['This is going to be way too long.' * 150, 'short example'] A = ['not super long but more than 5 tokens', 'tiny'] A = self._large_tokenizer(A_ ,padding=A_ ,truncation=A_ ,return_tensors='pt' ) A = self._large_tokenizer( text_target=A_ ,max_length=5 ,padding=A_ ,truncation=A_ ,return_tensors='pt' ) assert batch.input_ids.shape == (2, 1024) assert batch.attention_mask.shape == (2, 1024) assert targets["input_ids"].shape == (2, 5) assert len(A_ ) == 2 # input_ids, attention_mask. @slow def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict: # fmt: off A = {'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=A_ ,model_name='google/bigbird-pegasus-large-arxiv' ,revision='ba85d0851d708441f91440d509690f1ab6353415' ,) @require_sentencepiece @require_tokenizers class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Dict = PegasusTokenizer _lowerCamelCase: Dict = PegasusTokenizerFast _lowerCamelCase: Tuple = True _lowerCamelCase: Any = True def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: super().setUp() # We have a SentencePiece fixture for testing A = PegasusTokenizer(A_ ,offset=0 ,mask_token_sent=A_ ,mask_token='[MASK]' ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]: return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' ) def _SCREAMING_SNAKE_CASE ( self : Dict ,**A_ : List[Any] ) -> PegasusTokenizer: return PegasusTokenizer.from_pretrained(self.tmpdirname ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[Any] ) -> int: return ("This is a test", "This is a test") def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: A = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) A = self.tokenizer_class.from_pretrained(self.tmpdirname ) A = ( 'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>' ' <pad> <pad> <pad>' ) A = rust_tokenizer([raw_input_str] ,return_tensors=A_ ,add_special_tokens=A_ ).input_ids[0] A = py_tokenizer([raw_input_str] ,return_tensors=A_ ,add_special_tokens=A_ ).input_ids[0] self.assertListEqual(A_ ,A_ ) @require_torch def _SCREAMING_SNAKE_CASE ( self : int ) -> Any: A = ['This is going to be way too long.' * 1000, 'short example'] A = ['not super long but more than 5 tokens', 'tiny'] A = self._large_tokenizer(A_ ,padding=A_ ,truncation=A_ ,return_tensors='pt' ) A = self._large_tokenizer( text_target=A_ ,max_length=5 ,padding=A_ ,truncation=A_ ,return_tensors='pt' ) assert batch.input_ids.shape == (2, 4096) assert batch.attention_mask.shape == (2, 4096) assert targets["input_ids"].shape == (2, 5) assert len(A_ ) == 2 # input_ids, attention_mask. def _SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple: A = ( 'This is an example string that is used to test the original TF implementation against the HF' ' implementation' ) A = self._large_tokenizer(A_ ).input_ids self.assertListEqual( A_ ,[182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] ,)
74
import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch _lowerCamelCase : List[Any] = """sshleifer/bart-tiny-random""" _lowerCamelCase : List[Any] = """patrickvonplaten/t5-tiny-random""" @require_torch class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int: '''simple docstring''' return AutoConfig.from_pretrained(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any: '''simple docstring''' A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1) self.assertEqual(student.config.num_hidden_layers , 1) def SCREAMING_SNAKE_CASE ( self : int) ->Any: '''simple docstring''' A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]: '''simple docstring''' A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase__) self.assertEqual(student.config.encoder_layers , 1) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers) def SCREAMING_SNAKE_CASE ( self : Dict) ->int: '''simple docstring''' A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1) self.assertEqual(student.config.encoder_layers , 1) self.assertEqual(student.config.decoder_layers , 1) def SCREAMING_SNAKE_CASE ( self : str) ->List[Any]: '''simple docstring''' with self.assertRaises(UpperCAmelCase__): create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=UpperCAmelCase__ , d=UpperCAmelCase__)
14
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available a_ : Optional[Any] = {"""configuration_yolos""": ["""YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """YolosConfig""", """YolosOnnxConfig"""]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : int = ["""YolosFeatureExtractor"""] a_ : Optional[int] = ["""YolosImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[Any] = [ """YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST""", """YolosForObjectDetection""", """YolosModel""", """YolosPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_yolos import YolosFeatureExtractor from .image_processing_yolos import YolosImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_yolos import ( YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST, YolosForObjectDetection, YolosModel, YolosPreTrainedModel, ) else: import sys a_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
75
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Dict=3 , UpperCAmelCase__ : List[Any]=30 , UpperCAmelCase__ : Any=400 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Optional[Any]=[0.5, 0.5, 0.5] , UpperCAmelCase__ : Any=[0.5, 0.5, 0.5] , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Optional[int]=1 / 255 , UpperCAmelCase__ : Optional[Any]=True , ) ->str: '''simple docstring''' A__ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333} A__ = parent A__ = batch_size A__ = num_channels A__ = min_resolution A__ = max_resolution A__ = do_resize A__ = size A__ = do_normalize A__ = image_mean A__ = image_std A__ = do_rescale A__ = rescale_factor A__ = do_pad def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]: '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int=False) ->Optional[Any]: '''simple docstring''' if not batched: A__ = image_inputs[0] if isinstance(UpperCAmelCase__ , Image.Image): A__ , A__ = image.size else: A__ , A__ = image.shape[1], image.shape[2] if w < h: A__ = int(self.size['''shortest_edge'''] * h / w) A__ = self.size['''shortest_edge'''] elif w > h: A__ = self.size['''shortest_edge'''] A__ = int(self.size['''shortest_edge'''] * w / h) else: A__ = self.size['''shortest_edge'''] A__ = self.size['''shortest_edge'''] else: A__ = [] for image in image_inputs: A__ , A__ = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) A__ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__: item[0])[0] A__ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__: item[1])[1] return expected_height, expected_width @require_torch @require_vision class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = DeformableDetrImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple: '''simple docstring''' A__ = DeformableDetrImageProcessingTester(self) @property def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(UpperCAmelCase__ , '''image_mean''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''image_std''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_rescale''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_pad''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''size''')) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int: '''simple docstring''' A__ = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333}) self.assertEqual(image_processor.do_pad , UpperCAmelCase__) A__ = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCAmelCase__) self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84}) self.assertEqual(image_processor.do_pad , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]: '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) # create random PIL images A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , Image.Image) # Test not batched input A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__) A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , np.ndarray) # Test not batched input A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE ( self : int) ->Tuple: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , torch.Tensor) # Test not batched input A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[str]: '''simple docstring''' A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''') with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''') as f: A__ = json.loads(f.read()) A__ = {'''image_id''': 39_769, '''annotations''': target} # encode them A__ = DeformableDetrImageProcessor() A__ = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , return_tensors='''pt''') # verify pixel values A__ = torch.Size([1, 3, 800, 1_066]) self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase__) A__ = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase__ , atol=1e-4)) # verify area A__ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase__)) # verify boxes A__ = torch.Size([6, 4]) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase__) A__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase__ , atol=1e-3)) # verify image_id A__ = torch.tensor([39_769]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase__)) # verify is_crowd A__ = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase__)) # verify class_labels A__ = torch.tensor([75, 75, 63, 65, 17, 17]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase__)) # verify orig_size A__ = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase__)) # verify size A__ = torch.tensor([800, 1_066]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase__)) @slow def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[int]: '''simple docstring''' A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''') with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''') as f: A__ = json.loads(f.read()) A__ = {'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target} A__ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''') # encode them A__ = DeformableDetrImageProcessor(format='''coco_panoptic''') A__ = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , masks_path=UpperCAmelCase__ , return_tensors='''pt''') # verify pixel values A__ = torch.Size([1, 3, 800, 1_066]) self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase__) A__ = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase__ , atol=1e-4)) # verify area A__ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase__)) # verify boxes A__ = torch.Size([6, 4]) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase__) A__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase__ , atol=1e-3)) # verify image_id A__ = torch.tensor([39_769]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase__)) # verify is_crowd A__ = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase__)) # verify class_labels A__ = torch.tensor([17, 17, 63, 75, 75, 93]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase__)) # verify masks A__ = 822_873 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCAmelCase__) # verify orig_size A__ = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase__)) # verify size A__ = torch.tensor([800, 1_066]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase__))
14
0
from __future__ import annotations def lowerCamelCase__ ( _a , _a): SCREAMING_SNAKE_CASE : Tuple = get_failure_array(_a) # 2) Step through text searching for pattern SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = 0, 0 # index into text, pattern while i < len(_a): if pattern[j] == text[i]: if j == (len(_a) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: SCREAMING_SNAKE_CASE : List[str] = failure[j - 1] continue i += 1 return False def lowerCamelCase__ ( _a): SCREAMING_SNAKE_CASE : Any = [0] SCREAMING_SNAKE_CASE : Tuple = 0 SCREAMING_SNAKE_CASE : Optional[int] = 1 while j < len(_a): if pattern[i] == pattern[j]: i += 1 elif i > 0: SCREAMING_SNAKE_CASE : Union[str, Any] = failure[i - 1] continue j += 1 failure.append(_a) return failure if __name__ == "__main__": # Test 1) a_ = 'abc1abc12' a_ = 'alskfjaldsabc1abc1abc12k23adsfabcabc' a_ = 'alskfjaldsk23adsfabcabc' assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) a_ = 'ABABX' a_ = 'ABABZABABYABABX' assert kmp(pattern, text) # Test 3) a_ = 'AAAB' a_ = 'ABAAAAAB' assert kmp(pattern, text) # Test 4) a_ = 'abcdabcy' a_ = 'abcxabcdabxabcdabcdabcy' assert kmp(pattern, text) # Test 5) a_ = 'aabaabaaa' assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
76
from __future__ import annotations import typing from collections.abc import Iterable import numpy as np _lowerCamelCase : str = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007 _lowerCamelCase : Tuple = typing.Union[np.floataa, int, float] # noqa: UP007 def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> VectorOut: """simple docstring""" return np.sqrt(np.sum((np.asarray(lowercase_ ) - np.asarray(lowercase_ )) ** 2 ) ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> VectorOut: """simple docstring""" return sum((va - va) ** 2 for va, va in zip(lowercase_ , lowercase_ ) ) ** (1 / 2) if __name__ == "__main__": def SCREAMING_SNAKE_CASE ( ) -> None: """simple docstring""" from timeit import timeit print('''Without Numpy''' ) print( timeit( '''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) ) print('''With Numpy''' ) print( timeit( '''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) ) benchmark()
14
0
"""simple docstring""" import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetaImageProcessor class UpperCAmelCase_ ( unittest.TestCase): def __init__( self , a , a=7 , a=3 , a=3_0 , a=4_0_0 , a=True , a=None , a=True , a=[0.5, 0.5, 0.5] , a=[0.5, 0.5, 0.5] , a=True , a=1 / 2_5_5 , a=True , ) -> Any: # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p lowercase__ : Dict = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3} lowercase__ : Optional[int] = parent lowercase__ : Tuple = batch_size lowercase__ : List[str] = num_channels lowercase__ : List[str] = min_resolution lowercase__ : Tuple = max_resolution lowercase__ : Union[str, Any] = do_resize lowercase__ : Dict = size lowercase__ : str = do_normalize lowercase__ : List[Any] = image_mean lowercase__ : int = image_std lowercase__ : List[Any] = do_rescale lowercase__ : int = rescale_factor lowercase__ : int = do_pad def _UpperCAmelCase ( self ) -> Optional[Any]: return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def _UpperCAmelCase ( self , a , a=False ) -> Dict: if not batched: lowercase__ : str = image_inputs[0] if isinstance(a , Image.Image ): lowercase__ , lowercase__ : List[str] = image.size else: lowercase__ , lowercase__ : int = image.shape[1], image.shape[2] if w < h: lowercase__ : Any = int(self.size['shortest_edge'] * h / w ) lowercase__ : Dict = self.size['shortest_edge'] elif w > h: lowercase__ : int = self.size['shortest_edge'] lowercase__ : Tuple = int(self.size['shortest_edge'] * w / h ) else: lowercase__ : Optional[Any] = self.size['shortest_edge'] lowercase__ : List[Any] = self.size['shortest_edge'] else: lowercase__ : Union[str, Any] = [] for image in image_inputs: lowercase__ , lowercase__ : int = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) lowercase__ : Optional[Any] = max(a , key=lambda a : item[0] )[0] lowercase__ : Optional[Any] = max(a , key=lambda a : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class UpperCAmelCase_ ( _a , unittest.TestCase): lowerCamelCase__ : List[str] = DetaImageProcessor if is_vision_available() else None def _UpperCAmelCase ( self ) -> List[str]: lowercase__ : Optional[Any] = DetaImageProcessingTester(self ) @property def _UpperCAmelCase ( self ) -> List[str]: return self.image_processor_tester.prepare_image_processor_dict() def _UpperCAmelCase ( self ) -> Optional[int]: lowercase__ : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a , 'image_mean' ) ) self.assertTrue(hasattr(a , 'image_std' ) ) self.assertTrue(hasattr(a , 'do_normalize' ) ) self.assertTrue(hasattr(a , 'do_resize' ) ) self.assertTrue(hasattr(a , 'do_rescale' ) ) self.assertTrue(hasattr(a , 'do_pad' ) ) self.assertTrue(hasattr(a , 'size' ) ) def _UpperCAmelCase ( self ) -> str: lowercase__ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3} ) self.assertEqual(image_processor.do_pad , a ) def _UpperCAmelCase ( self ) -> Optional[Any]: pass def _UpperCAmelCase ( self ) -> Tuple: # Initialize image_processing lowercase__ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a ) for image in image_inputs: self.assertIsInstance(a , Image.Image ) # Test not batched input lowercase__ : List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values lowercase__ , lowercase__ : Optional[int] = self.image_processor_tester.get_expected_values(a ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowercase__ , lowercase__ : Tuple = self.image_processor_tester.get_expected_values(a , batched=a ) lowercase__ : List[str] = image_processing(a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _UpperCAmelCase ( self ) -> str: # Initialize image_processing lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a ) for image in image_inputs: self.assertIsInstance(a , np.ndarray ) # Test not batched input lowercase__ : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values lowercase__ , lowercase__ : List[Any] = self.image_processor_tester.get_expected_values(a ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowercase__ : Union[str, Any] = image_processing(a , return_tensors='pt' ).pixel_values lowercase__ , lowercase__ : int = self.image_processor_tester.get_expected_values(a , batched=a ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _UpperCAmelCase ( self ) -> Tuple: # Initialize image_processing lowercase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a ) for image in image_inputs: self.assertIsInstance(a , torch.Tensor ) # Test not batched input lowercase__ : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values lowercase__ , lowercase__ : Optional[Any] = self.image_processor_tester.get_expected_values(a ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowercase__ : Optional[int] = image_processing(a , return_tensors='pt' ).pixel_values lowercase__ , lowercase__ : Optional[Any] = self.image_processor_tester.get_expected_values(a , batched=a ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def _UpperCAmelCase ( self ) -> Dict: # prepare image and target lowercase__ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f: lowercase__ : Tuple = json.loads(f.read() ) lowercase__ : Optional[int] = {'image_id': 3_9_7_6_9, 'annotations': target} # encode them lowercase__ : Union[str, Any] = DetaImageProcessor() lowercase__ : List[str] = image_processing(images=a , annotations=a , return_tensors='pt' ) # verify pixel values lowercase__ : int = torch.Size([1, 3, 8_0_0, 1_0_6_6] ) self.assertEqual(encoding['pixel_values'].shape , a ) lowercase__ : List[str] = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , a , atol=1e-4 ) ) # verify area lowercase__ : List[str] = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , a ) ) # verify boxes lowercase__ : Dict = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , a ) lowercase__ : str = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , a , atol=1e-3 ) ) # verify image_id lowercase__ : Tuple = torch.tensor([3_9_7_6_9] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , a ) ) # verify is_crowd lowercase__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , a ) ) # verify class_labels lowercase__ : List[Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , a ) ) # verify orig_size lowercase__ : Tuple = torch.tensor([4_8_0, 6_4_0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , a ) ) # verify size lowercase__ : Optional[int] = torch.tensor([8_0_0, 1_0_6_6] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , a ) ) @slow def _UpperCAmelCase ( self ) -> List[str]: # prepare image, target and masks_path lowercase__ : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f: lowercase__ : int = json.loads(f.read() ) lowercase__ : Optional[Any] = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target} lowercase__ : Any = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' ) # encode them lowercase__ : List[Any] = DetaImageProcessor(format='coco_panoptic' ) lowercase__ : int = image_processing(images=a , annotations=a , masks_path=a , return_tensors='pt' ) # verify pixel values lowercase__ : List[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] ) self.assertEqual(encoding['pixel_values'].shape , a ) lowercase__ : Optional[int] = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , a , atol=1e-4 ) ) # verify area lowercase__ : List[str] = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , a ) ) # verify boxes lowercase__ : int = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , a ) lowercase__ : List[Any] = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , a , atol=1e-3 ) ) # verify image_id lowercase__ : Union[str, Any] = torch.tensor([3_9_7_6_9] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , a ) ) # verify is_crowd lowercase__ : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , a ) ) # verify class_labels lowercase__ : Tuple = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , a ) ) # verify masks lowercase__ : Dict = 8_2_2_8_7_3 self.assertEqual(encoding['labels'][0]['masks'].sum().item() , a ) # verify orig_size lowercase__ : Any = torch.tensor([4_8_0, 6_4_0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , a ) ) # verify size lowercase__ : List[str] = torch.tensor([8_0_0, 1_0_6_6] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , a ) )
77
from ...processing_utils import ProcessorMixin class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''SpeechT5FeatureExtractor''' UpperCAmelCase__ = '''SpeechT5Tokenizer''' def __init__( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple) ->Union[str, Any]: '''simple docstring''' super().__init__(UpperCAmelCase__ , UpperCAmelCase__) def __call__( self : Dict , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Any) ->Optional[Any]: '''simple docstring''' A__ = kwargs.pop('''audio''' , UpperCAmelCase__) A__ = kwargs.pop('''text''' , UpperCAmelCase__) A__ = kwargs.pop('''text_target''' , UpperCAmelCase__) A__ = kwargs.pop('''audio_target''' , UpperCAmelCase__) A__ = kwargs.pop('''sampling_rate''' , UpperCAmelCase__) if audio is not None and text is not None: raise ValueError( '''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''') if audio_target is not None and text_target is not None: raise ValueError( '''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''') if audio is None and audio_target is None and text is None and text_target is None: raise ValueError( '''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''') if audio is not None: A__ = self.feature_extractor(UpperCAmelCase__ , *UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , **UpperCAmelCase__) elif text is not None: A__ = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__) else: A__ = None if audio_target is not None: A__ = self.feature_extractor(audio_target=UpperCAmelCase__ , *UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , **UpperCAmelCase__) A__ = targets['''input_values'''] elif text_target is not None: A__ = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__) A__ = targets['''input_ids'''] else: A__ = None if inputs is None: return targets if targets is not None: A__ = labels A__ = targets.get('''attention_mask''') if decoder_attention_mask is not None: A__ = decoder_attention_mask return inputs def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : int) ->Optional[int]: '''simple docstring''' A__ = kwargs.pop('''input_values''' , UpperCAmelCase__) A__ = kwargs.pop('''input_ids''' , UpperCAmelCase__) A__ = kwargs.pop('''labels''' , UpperCAmelCase__) if input_values is not None and input_ids is not None: raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''') if input_values is None and input_ids is None and labels is None: raise ValueError( '''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''') if input_values is not None: A__ = self.feature_extractor.pad(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__) elif input_ids is not None: A__ = self.tokenizer.pad(UpperCAmelCase__ , **UpperCAmelCase__) else: A__ = None if labels is not None: if "input_ids" in labels or (isinstance(UpperCAmelCase__ , UpperCAmelCase__) and "input_ids" in labels[0]): A__ = self.tokenizer.pad(UpperCAmelCase__ , **UpperCAmelCase__) A__ = targets['''input_ids'''] else: A__ = self.feature_extractor.feature_size A__ = self.feature_extractor.num_mel_bins A__ = self.feature_extractor.pad(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__) A__ = feature_size_hack A__ = targets['''input_values'''] else: A__ = None if inputs is None: return targets if targets is not None: A__ = labels A__ = targets.get('''attention_mask''') if decoder_attention_mask is not None: A__ = decoder_attention_mask return inputs def SCREAMING_SNAKE_CASE ( self : Any , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Optional[Any]) ->Optional[Any]: '''simple docstring''' return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Union[str, Any]) ->Dict: '''simple docstring''' return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__)
14
0
"""simple docstring""" # limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( """pipelines_utils""", """0.22.0""", """Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""", standard_warn=False, stacklevel=3, )
78
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase : Tuple = logging.get_logger(__name__) _lowerCamelCase : str = { """microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""", } class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''git_vision_model''' def __init__( self : Any , UpperCAmelCase__ : Any=768 , UpperCAmelCase__ : int=3_072 , UpperCAmelCase__ : List[str]=12 , UpperCAmelCase__ : Dict=12 , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : List[Any]=224 , UpperCAmelCase__ : Union[str, Any]=16 , UpperCAmelCase__ : Union[str, Any]="quick_gelu" , UpperCAmelCase__ : Dict=1e-5 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Any=0.02 , **UpperCAmelCase__ : Any , ) ->Optional[int]: '''simple docstring''' super().__init__(**UpperCAmelCase__) A__ = hidden_size A__ = intermediate_size A__ = num_hidden_layers A__ = num_attention_heads A__ = num_channels A__ = patch_size A__ = image_size A__ = initializer_range A__ = attention_dropout A__ = layer_norm_eps A__ = hidden_act @classmethod def SCREAMING_SNAKE_CASE ( cls : Any , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : int) ->"PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(UpperCAmelCase__) A__ , A__ = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__) # get the vision config dict if we are loading from GITConfig if config_dict.get('''model_type''') == "git": A__ = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""") return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''git''' def __init__( self : Dict , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int=30_522 , UpperCAmelCase__ : Optional[int]=768 , UpperCAmelCase__ : Dict=6 , UpperCAmelCase__ : int=12 , UpperCAmelCase__ : List[str]=3_072 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=1_024 , UpperCAmelCase__ : List[str]=0.02 , UpperCAmelCase__ : Any=1e-12 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : List[Any]="absolute" , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : int=101 , UpperCAmelCase__ : Tuple=102 , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : List[str] , ) ->Any: '''simple docstring''' super().__init__(bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__) if vision_config is None: A__ = {} logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''') A__ = GitVisionConfig(**UpperCAmelCase__) A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = hidden_act A__ = intermediate_size A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = initializer_range A__ = layer_norm_eps A__ = position_embedding_type A__ = use_cache A__ = tie_word_embeddings A__ = num_image_with_embedding A__ = bos_token_id A__ = eos_token_id def SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]: '''simple docstring''' A__ = copy.deepcopy(self.__dict__) A__ = self.vision_config.to_dict() A__ = self.__class__.model_type return output
14
0
'''simple docstring''' def __lowercase ( __lowercase ) -> bool: '''simple docstring''' return number & 1 == 0 if __name__ == "__main__": import doctest doctest.testmod()
79
import requests from bsa import BeautifulSoup def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str: """simple docstring""" A__ = BeautifulSoup(requests.get(lowercase_ , params=lowercase_ ).content , '''html.parser''' ) A__ = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} ) A__ = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' ) return anchors[2].get_text() if __name__ == "__main__": _lowerCamelCase : Optional[Any] = { """title""": ( """Precisely geometry controlled microsupercapacitors for ultrahigh areal """ """capacitance, volumetric capacitance, and energy density""" ), """journal""": """Chem. Mater.""", """volume""": 30, """pages""": """3979-3990""", """year""": 2018, """hl""": """en""", } print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
14
0
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging a__ : List[str] = logging.get_logger(__name__) if is_vision_available(): import PIL class lowercase_ ( a__ ): __UpperCAmelCase = ['pixel_values'] def __init__( self , a = True , a = None , a = PILImageResampling.BICUBIC , a = True , a = None , a = True , a = 1 / 2_55 , a = True , a = None , a = None , a = True , **a , ): super().__init__(**a ) UpperCamelCase__ = size if size is not None else {"shortest_edge": 2_24} UpperCamelCase__ = get_size_dict(a , default_to_square=a ) UpperCamelCase__ = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24} UpperCamelCase__ = get_size_dict(a , default_to_square=a , param_name="crop_size" ) UpperCamelCase__ = do_resize UpperCamelCase__ = size UpperCamelCase__ = resample UpperCamelCase__ = do_center_crop UpperCamelCase__ = crop_size UpperCamelCase__ = do_rescale UpperCamelCase__ = rescale_factor UpperCamelCase__ = do_normalize UpperCamelCase__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN UpperCamelCase__ = image_std if image_std is not None else OPENAI_CLIP_STD UpperCamelCase__ = do_convert_rgb def __a ( self , a , a , a = PILImageResampling.BICUBIC , a = None , **a , ): UpperCamelCase__ = get_size_dict(a , default_to_square=a ) if "shortest_edge" not in size: raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) UpperCamelCase__ = get_resize_output_image_size(a , size=size["shortest_edge"] , default_to_square=a ) return resize(a , size=a , resample=a , data_format=a , **a ) def __a ( self , a , a , a = None , **a , ): UpperCamelCase__ = get_size_dict(a ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a ) def __a ( self , a , a , a = None , **a , ): return rescale(a , scale=a , data_format=a , **a ) def __a ( self , a , a , a , a = None , **a , ): return normalize(a , mean=a , std=a , data_format=a , **a ) def __a ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ): UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize UpperCamelCase__ = size if size is not None else self.size UpperCamelCase__ = get_size_dict(a , param_name="size" , default_to_square=a ) UpperCamelCase__ = resample if resample is not None else self.resample UpperCamelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCamelCase__ = crop_size if crop_size is not None else self.crop_size UpperCamelCase__ = get_size_dict(a , param_name="crop_size" , default_to_square=a ) UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale UpperCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize UpperCamelCase__ = image_mean if image_mean is not None else self.image_mean UpperCamelCase__ = image_std if image_std is not None else self.image_std UpperCamelCase__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb UpperCamelCase__ = make_list_of_images(a ) if not valid_images(a ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # PIL RGBA images are converted to RGB if do_convert_rgb: UpperCamelCase__ = [convert_to_rgb(a ) for image in images] # All transformations expect numpy arrays. UpperCamelCase__ = [to_numpy_array(a ) for image in images] if do_resize: UpperCamelCase__ = [self.resize(image=a , size=a , resample=a ) for image in images] if do_center_crop: UpperCamelCase__ = [self.center_crop(image=a , size=a ) for image in images] if do_rescale: UpperCamelCase__ = [self.rescale(image=a , scale=a ) for image in images] if do_normalize: UpperCamelCase__ = [self.normalize(image=a , mean=a , std=a ) for image in images] UpperCamelCase__ = [to_channel_dimension_format(a , a ) for image in images] UpperCamelCase__ = {"pixel_values": images} return BatchFeature(data=a , tensor_type=a )
80
import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[Any]: """simple docstring""" A__ = StableDiffusionPipeline.from_pretrained(lowercase_ , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors A__ = load_file(lowercase_ ) A__ = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: A__ = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' ) A__ = pipeline.text_encoder else: A__ = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' ) A__ = pipeline.unet # find the target layer A__ = layer_infos.pop(0 ) while len(lowercase_ ) > -1: try: A__ = curr_layer.__getattr__(lowercase_ ) if len(lowercase_ ) > 0: A__ = layer_infos.pop(0 ) elif len(lowercase_ ) == 0: break except Exception: if len(lowercase_ ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: A__ = layer_infos.pop(0 ) A__ = [] if "lora_down" in key: pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) ) pair_keys.append(lowercase_ ) else: pair_keys.append(lowercase_ ) pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: A__ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) A__ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ ).unsqueeze(2 ).unsqueeze(3 ) else: A__ = state_dict[pair_keys[0]].to(torch.floataa ) A__ = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ ) # update visited list for item in pair_keys: visited.append(lowercase_ ) return pipeline if __name__ == "__main__": _lowerCamelCase : Tuple = argparse.ArgumentParser() parser.add_argument( """--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format.""" ) parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors""" ) parser.add_argument( """--lora_prefix_text_encoder""", default="""lora_te""", type=str, help="""The prefix of text encoder weight in safetensors""", ) parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""") parser.add_argument( """--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not.""" ) parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""") _lowerCamelCase : Tuple = parser.parse_args() _lowerCamelCase : List[Any] = args.base_model_path _lowerCamelCase : Optional[int] = args.checkpoint_path _lowerCamelCase : Dict = args.dump_path _lowerCamelCase : Optional[Any] = args.lora_prefix_unet _lowerCamelCase : Optional[int] = args.lora_prefix_text_encoder _lowerCamelCase : List[Any] = args.alpha _lowerCamelCase : int = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) _lowerCamelCase : Tuple = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
14
0
"""simple docstring""" import math from typing import Callable, List, Optional, Union import numpy as np import PIL import torch from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler def _A ( lowercase , lowercase , lowercase=[] ): """simple docstring""" a =size[0] - overlap_pixels * 2 a =size[1] - overlap_pixels * 2 for letter in ["l", "r"]: if letter in remove_borders: size_x += overlap_pixels for letter in ["t", "b"]: if letter in remove_borders: size_y += overlap_pixels a =np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55 a =np.pad(lowercase , mode='''linear_ramp''' , pad_width=lowercase , end_values=0 ) if "l" in remove_borders: a =mask[:, overlap_pixels : mask.shape[1]] if "r" in remove_borders: a =mask[:, 0 : mask.shape[1] - overlap_pixels] if "t" in remove_borders: a =mask[overlap_pixels : mask.shape[0], :] if "b" in remove_borders: a =mask[0 : mask.shape[0] - overlap_pixels, :] return mask def _A ( lowercase , lowercase , lowercase ): """simple docstring""" return max(lowercase , min(lowercase , lowercase ) ) def _A ( lowercase , lowercase , lowercase ): """simple docstring""" return ( clamp(rect[0] , min[0] , max[0] ), clamp(rect[1] , min[1] , max[1] ), clamp(rect[2] , min[0] , max[0] ), clamp(rect[3] , min[1] , max[1] ), ) def _A ( lowercase , lowercase , lowercase ): """simple docstring""" a =list(lowercase ) rect[0] -= overlap rect[1] -= overlap rect[2] += overlap rect[3] += overlap a =clamp_rect(lowercase , [0, 0] , [image_size[0], image_size[1]] ) return rect def _A ( lowercase , lowercase , lowercase , lowercase ): """simple docstring""" a =Image.new('''RGB''' , (tile.size[0] + original_slice, tile.size[1]) ) result.paste( original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop( (slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , ) result.paste(lowercase , (original_slice, 0) ) return result def _A ( lowercase , lowercase ): """simple docstring""" a =(original_image_slice * 4, 0, tile.size[0], tile.size[1]) a =tile.crop(lowercase ) return tile def _A ( lowercase , lowercase ): """simple docstring""" a =n % d return n - divisor class __A ( _SCREAMING_SNAKE_CASE ): """simple docstring""" def __init__( self , __A , __A , __A , __A , __A , __A , __A = 350 , ) -> int: super().__init__( vae=__A , text_encoder=__A , tokenizer=__A , unet=__A , low_res_scheduler=__A , scheduler=__A , max_noise_level=__A , ) def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A , __A , __A , __A , **__A ) -> Tuple: torch.manual_seed(0 ) a =( min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ), min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ), min(image.size[0] , (x + 1) * tile_size ), min(image.size[1] , (y + 1) * tile_size ), ) a =add_overlap_rect(__A , __A , image.size ) a =image.crop(__A ) a =((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0] a =translated_slice_x - (original_image_slice / 2) a =max(0 , __A ) a =squeeze_tile(__A , __A , __A , __A ) a =to_input.size a =to_input.resize((tile_size, tile_size) , Image.BICUBIC ) a =super(__A , self ).__call__(image=__A , **__A ).images[0] a =upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC ) a =unsqueeze_tile(__A , __A ) a =upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC ) a =[] if x == 0: remove_borders.append('''l''' ) elif crop_rect[2] == image.size[0]: remove_borders.append('''r''' ) if y == 0: remove_borders.append('''t''' ) elif crop_rect[3] == image.size[1]: remove_borders.append('''b''' ) a =Image.fromarray( make_transparency_mask( (upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__A ) , mode='''L''' , ) final_image.paste( __A , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __A ) @torch.no_grad() def __call__( self , __A , __A , __A = 75 , __A = 9.0 , __A = 50 , __A = None , __A = 1 , __A = 0.0 , __A = None , __A = None , __A = None , __A = 1 , __A = 128 , __A = 32 , __A = 32 , ) -> Optional[int]: a =Image.new('''RGB''' , (image.size[0] * 4, image.size[1] * 4) ) a =math.ceil(image.size[0] / tile_size ) a =math.ceil(image.size[1] / tile_size ) a =tcx * tcy a =0 for y in range(__A ): for x in range(__A ): self._process_tile( __A , __A , __A , __A , __A , __A , __A , prompt=__A , num_inference_steps=__A , guidance_scale=__A , noise_level=__A , negative_prompt=__A , num_images_per_prompt=__A , eta=__A , generator=__A , latents=__A , ) current_count += 1 if callback is not None: callback({'''progress''': current_count / total_tile_count, '''image''': final_image} ) return final_image def _A ( ): """simple docstring""" # Run a demo a ='''stabilityai/stable-diffusion-x4-upscaler''' a =StableDiffusionTiledUpscalePipeline.from_pretrained(lowercase , revision='''fp16''' , torch_dtype=torch.floataa ) a =pipe.to('''cuda''' ) a =Image.open('''../../docs/source/imgs/diffusers_library.jpg''' ) def callback(lowercase ): print(f'''progress: {obj["progress"]:.4f}''' ) obj["image"].save('''diffusers_library_progress.jpg''' ) a =pipe(image=lowercase , prompt='''Black font, white background, vector''' , noise_level=40 , callback=lowercase ) final_image.save('''diffusers_library.jpg''' ) if __name__ == "__main__": main()
81
import os import pytest from transformers.dynamic_module_utils import get_imports _lowerCamelCase : Any = """ import os """ _lowerCamelCase : Optional[int] = """ def foo(): import os return False """ _lowerCamelCase : List[Any] = """ def foo(): def bar(): if True: import os return False return bar() """ _lowerCamelCase : List[Any] = """ import os try: import bar except ImportError: raise ValueError() """ _lowerCamelCase : Union[str, Any] = """ import os def foo(): try: import bar except ImportError: raise ValueError() """ _lowerCamelCase : List[Any] = """ import os try: import bar except (ImportError, AttributeError): raise ValueError() """ _lowerCamelCase : List[Any] = """ import os try: import bar except ImportError as e: raise ValueError() """ _lowerCamelCase : str = """ import os try: import bar except: raise ValueError() """ _lowerCamelCase : Optional[Any] = """ import os try: import bar import baz except ImportError: raise ValueError() """ _lowerCamelCase : Any = """ import os try: import bar import baz except ImportError: x = 1 raise ValueError() """ _lowerCamelCase : Dict = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize('''case''' , lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int: """simple docstring""" A__ = os.path.join(lowercase_ , '''test_file.py''' ) with open(lowercase_ , '''w''' ) as _tmp_file: _tmp_file.write(lowercase_ ) A__ = get_imports(lowercase_ ) assert parsed_imports == ["os"]
14
0
import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class __lowerCAmelCase ( unittest.TestCase ): __lowerCamelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING __lowerCamelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def snake_case ( self , _snake_case , _snake_case , _snake_case ): """simple docstring""" _lowerCAmelCase = TextaTextGenerationPipeline(model=_snake_case , tokenizer=_snake_case ) return generator, ["Something to write", "Something else"] def snake_case ( self , _snake_case , _snake_case ): """simple docstring""" _lowerCAmelCase = generator("""Something there""" ) self.assertEqual(_snake_case , [{"""generated_text""": ANY(_snake_case )}] ) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) ) _lowerCAmelCase = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=_snake_case ) self.assertEqual( _snake_case , [ [{"""generated_text""": ANY(_snake_case )}, {"""generated_text""": ANY(_snake_case )}], [{"""generated_text""": ANY(_snake_case )}, {"""generated_text""": ANY(_snake_case )}], ] , ) _lowerCAmelCase = generator( ["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=_snake_case ) self.assertEqual( _snake_case , [ [{"""generated_text""": ANY(_snake_case )}, {"""generated_text""": ANY(_snake_case )}], [{"""generated_text""": ANY(_snake_case )}, {"""generated_text""": ANY(_snake_case )}], ] , ) with self.assertRaises(_snake_case ): generator(4 ) @require_torch def snake_case ( self ): """simple docstring""" _lowerCAmelCase = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" ) # do_sample=False necessary for reproducibility _lowerCAmelCase = generator("""Something there""" , do_sample=_snake_case ) self.assertEqual(_snake_case , [{"""generated_text""": """"""}] ) _lowerCAmelCase = 3 _lowerCAmelCase = generator( """Something there""" , num_return_sequences=_snake_case , num_beams=_snake_case , ) _lowerCAmelCase = [ {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """"""}, ] self.assertEqual(_snake_case , _snake_case ) _lowerCAmelCase = generator("""This is a test""" , do_sample=_snake_case , num_return_sequences=2 , return_tensors=_snake_case ) self.assertEqual( _snake_case , [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ] , ) _lowerCAmelCase = generator.model.config.eos_token_id _lowerCAmelCase = """<pad>""" _lowerCAmelCase = generator( ["""This is a test""", """This is a second test"""] , do_sample=_snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=_snake_case , ) self.assertEqual( _snake_case , [ [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], ] , ) @require_tf def snake_case ( self ): """simple docstring""" _lowerCAmelCase = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" ) # do_sample=False necessary for reproducibility _lowerCAmelCase = generator("""Something there""" , do_sample=_snake_case ) self.assertEqual(_snake_case , [{"""generated_text""": """"""}] )
82
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int: """simple docstring""" return int(input_a == input_a == 0 ) def SCREAMING_SNAKE_CASE ( ) -> None: """simple docstring""" print('''Truth Table of NOR Gate:''' ) print('''| Input 1 | Input 2 | Output |''' ) print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" ) print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" ) print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" ) print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" ) if __name__ == "__main__": import doctest doctest.testmod() main()
14
0
'''simple docstring''' def A__ ( UpperCAmelCase_ ): _UpperCamelCase : List[str] = abs(UpperCAmelCase_ ) _UpperCamelCase : int = 0 while n > 0: res += n % 1_0 n //= 1_0 return res def A__ ( UpperCAmelCase_ ): _UpperCamelCase : List[Any] = abs(UpperCAmelCase_ ) return n if n < 1_0 else n % 1_0 + sum_of_digits(n // 1_0 ) def A__ ( UpperCAmelCase_ ): return sum(int(UpperCAmelCase_ ) for c in str(abs(UpperCAmelCase_ ) ) ) def A__ ( ): from collections.abc import Callable from timeit import timeit def benchmark_a_function(UpperCAmelCase_ , UpperCAmelCase_ ) -> None: _UpperCamelCase : str = f'{func.__name__}({value})' _UpperCamelCase : Tuple = timeit(f'__main__.{call}' , setup='import __main__' ) print(f'{call:56} = {func(UpperCAmelCase_ )} -- {timing:.4f} seconds' ) for value in (2_6_2_1_4_4, 1_1_2_5_8_9_9_9_0_6_8_4_2_6_2_4, 1_2_6_7_6_5_0_6_0_0_2_2_8_2_2_9_4_0_1_4_9_6_7_0_3_2_0_5_3_7_6): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(UpperCAmelCase_ , UpperCAmelCase_ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
83
import os import sys import unittest _lowerCamelCase : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) _lowerCamelCase : Any = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""") _lowerCamelCase : str = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""") class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple: '''simple docstring''' A__ = get_test_to_tester_mapping(UpperCAmelCase__) A__ = get_test_to_tester_mapping(UpperCAmelCase__) A__ = {'''BertModelTest''': '''BertModelTester'''} A__ = { '''BlipModelTest''': '''BlipModelTester''', '''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''', '''BlipTextModelTest''': '''BlipTextModelTester''', '''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''', '''BlipVQAModelTest''': '''BlipVQAModelTester''', '''BlipVisionModelTest''': '''BlipVisionModelTester''', } self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__) self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[Any]: '''simple docstring''' A__ = get_model_to_test_mapping(UpperCAmelCase__) A__ = get_model_to_test_mapping(UpperCAmelCase__) A__ = { '''BertForMaskedLM''': ['''BertModelTest'''], '''BertForMultipleChoice''': ['''BertModelTest'''], '''BertForNextSentencePrediction''': ['''BertModelTest'''], '''BertForPreTraining''': ['''BertModelTest'''], '''BertForQuestionAnswering''': ['''BertModelTest'''], '''BertForSequenceClassification''': ['''BertModelTest'''], '''BertForTokenClassification''': ['''BertModelTest'''], '''BertLMHeadModel''': ['''BertModelTest'''], '''BertModel''': ['''BertModelTest'''], } A__ = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''], '''BlipModel''': ['''BlipModelTest'''], '''BlipTextModel''': ['''BlipTextModelTest'''], '''BlipVisionModel''': ['''BlipVisionModelTest'''], } self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__) self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->str: '''simple docstring''' A__ = get_model_to_tester_mapping(UpperCAmelCase__) A__ = get_model_to_tester_mapping(UpperCAmelCase__) A__ = { '''BertForMaskedLM''': ['''BertModelTester'''], '''BertForMultipleChoice''': ['''BertModelTester'''], '''BertForNextSentencePrediction''': ['''BertModelTester'''], '''BertForPreTraining''': ['''BertModelTester'''], '''BertForQuestionAnswering''': ['''BertModelTester'''], '''BertForSequenceClassification''': ['''BertModelTester'''], '''BertForTokenClassification''': ['''BertModelTester'''], '''BertLMHeadModel''': ['''BertModelTester'''], '''BertModel''': ['''BertModelTester'''], } A__ = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''], '''BlipModel''': ['''BlipModelTester'''], '''BlipTextModel''': ['''BlipTextModelTester'''], '''BlipVisionModel''': ['''BlipVisionModelTester'''], } self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__) self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
14
0
"""simple docstring""" import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, UNetaDConditionModel, VideoToVideoSDPipeline, ) from diffusers.utils import floats_tensor, is_xformers_available, skip_mps from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): UpperCAmelCase_ :Dict = VideoToVideoSDPipeline UpperCAmelCase_ :List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"} ) - {"image", "width", "height"} UpperCAmelCase_ :int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"} ) - {"image"} UpperCAmelCase_ :int = PipelineTesterMixin.required_optional_params - {"latents"} UpperCAmelCase_ :Union[str, Any] = False # No `output_type`. UpperCAmelCase_ :Optional[Any] = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback", "callback_steps", ] ) def __lowerCAmelCase ( self ) -> Tuple: torch.manual_seed(0 ) lowerCAmelCase_ :Any = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , ) lowerCAmelCase_ :int = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , ) torch.manual_seed(0 ) lowerCAmelCase_ :int = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , ) lowerCAmelCase_ :Any = CLIPTextModel(__A ) lowerCAmelCase_ :Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowerCAmelCase_ :List[str] = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, } return components def __lowerCAmelCase ( self , __A , __A=0 ) -> Dict: # 3 frames lowerCAmelCase_ :str = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(__A ) ).to(__A ) if str(__A ).startswith("""mps""" ): lowerCAmelCase_ :List[str] = torch.manual_seed(__A ) else: lowerCAmelCase_ :Optional[Any] = torch.Generator(device=__A ).manual_seed(__A ) lowerCAmelCase_ :List[Any] = { """prompt""": """A painting of a squirrel eating a burger""", """video""": video, """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """pt""", } return inputs def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator lowerCAmelCase_ :Optional[Any] = self.get_dummy_components() lowerCAmelCase_ :Optional[int] = VideoToVideoSDPipeline(**__A ) lowerCAmelCase_ :List[Any] = sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :int = self.get_dummy_inputs(__A ) lowerCAmelCase_ :List[str] = """np""" lowerCAmelCase_ :List[str] = sd_pipe(**__A ).frames lowerCAmelCase_ :Any = frames[0][-3:, -3:, -1] assert frames[0].shape == (32, 32, 3) lowerCAmelCase_ :Union[str, Any] = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def __lowerCAmelCase ( self ) -> Tuple: self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__A , expected_max_diff=5E-3 ) @unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" ) def __lowerCAmelCase ( self ) -> Union[str, Any]: pass @unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" ) def __lowerCAmelCase ( self ) -> List[str]: pass @unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" ) def __lowerCAmelCase ( self ) -> Any: pass def __lowerCAmelCase ( self ) -> str: return super().test_progress_bar() @slow @skip_mps class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :str = VideoToVideoSDPipeline.from_pretrained("""cerspense/zeroscope_v2_XL""" , torch_dtype=torch.floataa ) pipe.enable_model_cpu_offload() # 10 frames lowerCAmelCase_ :int = torch.Generator(device="""cpu""" ).manual_seed(0 ) lowerCAmelCase_ :Union[str, Any] = torch.randn((1, 10, 3, 1024, 576) , generator=__A ) lowerCAmelCase_ :Optional[int] = video.to("""cuda""" ) lowerCAmelCase_ :Tuple = """Spiderman is surfing""" lowerCAmelCase_ :Union[str, Any] = pipe(__A , video=__A , generator=__A , num_inference_steps=3 , output_type="""pt""" ).frames lowerCAmelCase_ :str = np.array([-1.0_4_5_8_9_8_4, -1.1_2_7_9_2_9_7, -0.9_6_6_3_0_8_6, -0.9_1_5_0_3_9_0_6, -0.7_5_0_9_7_6_5_6] ) assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
84
import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class UpperCamelCase_ : '''simple docstring''' def __init__( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int = 13 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 128 , UpperCAmelCase__ : Optional[Any]=[16, 32, 64, 128] , UpperCAmelCase__ : int = 7 , UpperCAmelCase__ : int = 4 , UpperCAmelCase__ : int = 37 , UpperCAmelCase__ : str = "gelu" , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : int = 10 , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 128 , UpperCAmelCase__ : List[int] = [2, 2, 2, 2] , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , ) ->List[Any]: '''simple docstring''' A__ = parent A__ = batch_size A__ = image_size A__ = patch_size A__ = num_channels A__ = is_training A__ = use_labels A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = type_sequence_label_size A__ = initializer_range A__ = encoder_stride A__ = num_attention_outputs A__ = embed_dim A__ = embed_dim + 1 A__ = resolution A__ = depths A__ = hidden_sizes A__ = dim A__ = mlp_expansion_ratio def SCREAMING_SNAKE_CASE ( self : List[Any]) ->str: '''simple docstring''' A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size) A__ = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE ( self : int) ->str: '''simple docstring''' return EfficientFormerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict) ->Dict: '''simple docstring''' A__ = TFEfficientFormerModel(config=UpperCAmelCase__) A__ = model(UpperCAmelCase__ , training=UpperCAmelCase__) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str) ->Union[str, Any]: '''simple docstring''' A__ = self.type_sequence_label_size A__ = TFEfficientFormerForImageClassification(UpperCAmelCase__) A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__ , training=UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) # test greyscale images A__ = 1 A__ = TFEfficientFormerForImageClassification(UpperCAmelCase__) A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def SCREAMING_SNAKE_CASE ( self : int) ->List[str]: '''simple docstring''' A__ = self.prepare_config_and_inputs() A__ , A__ , A__ = config_and_inputs A__ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = ( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) UpperCAmelCase__ = ( { '''feature-extraction''': TFEfficientFormerModel, '''image-classification''': ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[str]: '''simple docstring''' A__ = TFEfficientFormerModelTester(self) A__ = ConfigTester( self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37) def SCREAMING_SNAKE_CASE ( self : int) ->Any: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''EfficientFormer does not use inputs_embeds''') def SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict: '''simple docstring''' pass @unittest.skip(reason='''EfficientFormer does not support input and output embeddings''') def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]: '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[Any]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(UpperCAmelCase__) A__ = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : str) ->Any: '''simple docstring''' def check_hidden_states_output(UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict): A__ = model_class(UpperCAmelCase__) A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__) A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A__ = getattr( self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1) self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__) if hasattr(self.model_tester , '''encoder_seq_length'''): A__ = self.model_tester.encoder_seq_length if hasattr(self.model_tester , '''chunk_length''') and self.model_tester.chunk_length > 1: A__ = seq_length * self.model_tester.chunk_length else: A__ = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , ) if config.is_encoder_decoder: A__ = outputs.decoder_hidden_states self.asseretIsInstance(UpperCAmelCase__ , (list, tuple)) self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__) A__ = getattr(self.model_tester , '''seq_length''' , UpperCAmelCase__) A__ = getattr(self.model_tester , '''decoder_seq_length''' , UpperCAmelCase__) self.assertListEqual( list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , ) A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = True check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A__ = True check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict=False) ->int: '''simple docstring''' A__ = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__) @unittest.skip(reason='''EfficientFormer does not implement masked image modeling yet''') def SCREAMING_SNAKE_CASE ( self : str) ->str: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Any) ->Tuple: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__) @slow def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]: '''simple docstring''' for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = TFEfficientFormerModel.from_pretrained(UpperCAmelCase__) self.assertIsNotNone(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Any) ->str: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = True A__ = getattr(self.model_tester , '''seq_length''' , UpperCAmelCase__) A__ = getattr(self.model_tester , '''encoder_seq_length''' , UpperCAmelCase__) A__ = getattr(self.model_tester , '''key_length''' , UpperCAmelCase__) A__ = getattr(self.model_tester , '''chunk_length''' , UpperCAmelCase__) if chunk_length is not None and hasattr(self.model_tester , '''num_hashes'''): A__ = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: A__ = True A__ = False A__ = True A__ = model_class(UpperCAmelCase__) A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__) A__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(UpperCAmelCase__) , self.model_tester.num_attention_outputs) # check that output_attentions also work using config del inputs_dict["output_attentions"] A__ = True A__ = model_class(UpperCAmelCase__) A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__) A__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(UpperCAmelCase__) , self.model_tester.num_attention_outputs) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , ) else: self.assertListEqual( list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , ) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model A__ = model_class(UpperCAmelCase__) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes A__ = { key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=UpperCAmelCase__) for key, val in model.input_signature.items() if key in model.dummy_inputs } A__ = model(UpperCAmelCase__) self.assertTrue(outputs_dict is not None) def SCREAMING_SNAKE_CASE ( ) -> Any: """simple docstring""" A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: '''simple docstring''' return ( EfficientFormerImageProcessor.from_pretrained('''snap-research/efficientformer-l1-300''') if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any: '''simple docstring''' A__ = TFEfficientFormerForImageClassification.from_pretrained('''snap-research/efficientformer-l1-300''') A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''tf''') # forward pass A__ = model(**UpperCAmelCase__ , training=UpperCAmelCase__) # verify the logits A__ = tf.TensorShape((1, 1_000)) self.assertEqual(outputs.logits.shape , UpperCAmelCase__) A__ = tf.constant([-0.0555, 0.4825, -0.0852]) self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4)) @slow def SCREAMING_SNAKE_CASE ( self : Dict) ->int: '''simple docstring''' A__ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( '''snap-research/efficientformer-l1-300''') A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''tf''') # forward pass A__ = model(**UpperCAmelCase__ , training=UpperCAmelCase__) # verify the logits A__ = tf.TensorShape((1, 1_000)) self.assertEqual(outputs.logits.shape , UpperCAmelCase__) A__ = tf.constant([-0.1312, 0.4353, -1.0499]) self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4))
14
0
'''simple docstring''' from __future__ import annotations import math from collections.abc import Callable def UpperCamelCase_( snake_case : Callable[[int | float], int | float] , snake_case : int | float , snake_case : int | float , snake_case : int = 1_0_0 , ): '''simple docstring''' snake_case_ = x_start snake_case_ = fnc(snake_case ) snake_case_ = 0.0 for _ in range(snake_case ): # Approximates curve as a sequence of linear lines and sums their length snake_case_ = (x_end - x_start) / steps + xa snake_case_ = fnc(snake_case ) length += math.hypot(xa - xa , fxa - fxa ) # Increment step snake_case_ = xa snake_case_ = fxa return length if __name__ == "__main__": def UpperCamelCase_( snake_case : List[Any] ): '''simple docstring''' return math.sin(1_0 * x ) print("f(x) = sin(10 * x)") print("The length of the curve from x = -10 to x = 10 is:") _SCREAMING_SNAKE_CASE : Optional[int] = 10 while i <= 10_0000: print(F"With {i} steps: {line_length(f, -10, 10, i)}") i *= 10
85
from __future__ import annotations def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> tuple[float, list[float]]: """simple docstring""" A__ = list(range(len(lowercase_ ) ) ) A__ = [v / w for v, w in zip(lowercase_ , lowercase_ )] index.sort(key=lambda lowercase_ : ratio[i] , reverse=lowercase_ ) A__ = 0 A__ = [0] * len(lowercase_ ) for i in index: if weight[i] <= capacity: A__ = 1 max_value += value[i] capacity -= weight[i] else: A__ = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
14
0
"""simple docstring""" from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxCrossAttnUpBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, FlaxUpBlockaD, ) @flax.struct.dataclass class A__ ( _lowerCamelCase): A_ : jnp.ndarray @flax_register_to_config class A__ ( nn.Module , _lowerCamelCase , _lowerCamelCase): A_ : int = 3_2 A_ : int = 4 A_ : int = 4 A_ : Tuple[str] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) A_ : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D") A_ : Union[bool, Tuple[bool]] = False A_ : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0) A_ : int = 2 A_ : Union[int, Tuple[int]] = 8 A_ : Optional[Union[int, Tuple[int]]] = None A_ : int = 1_2_8_0 A_ : float = 0.0 A_ : bool = False A_ : jnp.dtype = jnp.floataa A_ : bool = True A_ : int = 0 A_ : bool = False def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): # init input tensors __lowerCAmelCase : Optional[Any] = (1, self.in_channels, self.sample_size, self.sample_size) __lowerCAmelCase : int = jnp.zeros(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa ) __lowerCAmelCase : str = jnp.ones((1,) , dtype=jnp.intaa ) __lowerCAmelCase : Any = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) __lowerCAmelCase , __lowerCAmelCase : List[str] = jax.random.split(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Dict = {'params': params_rng, 'dropout': dropout_rng} return self.init(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )["params"] def __lowerCamelCase ( self ): __lowerCAmelCase : Optional[int] = self.block_out_channels __lowerCAmelCase : List[str] = block_out_channels[0] * 4 if self.num_attention_heads is not None: raise ValueError( 'At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.' ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. __lowerCAmelCase : Dict = self.num_attention_heads or self.attention_head_dim # input __lowerCAmelCase : Union[str, Any] = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time __lowerCAmelCase : Union[str, Any] = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) __lowerCAmelCase : Optional[int] = FlaxTimestepEmbedding(_SCREAMING_SNAKE_CASE , dtype=self.dtype ) __lowerCAmelCase : Optional[Any] = self.only_cross_attention if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Union[str, Any] = (only_cross_attention,) * len(self.down_block_types ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Tuple = (num_attention_heads,) * len(self.down_block_types ) # down __lowerCAmelCase : str = [] __lowerCAmelCase : Optional[int] = block_out_channels[0] for i, down_block_type in enumerate(self.down_block_types ): __lowerCAmelCase : Optional[Any] = output_channel __lowerCAmelCase : Dict = block_out_channels[i] __lowerCAmelCase : Optional[int] = i == len(_SCREAMING_SNAKE_CASE ) - 1 if down_block_type == "CrossAttnDownBlock2D": __lowerCAmelCase : Any = FlaxCrossAttnDownBlockaD( in_channels=_SCREAMING_SNAKE_CASE , out_channels=_SCREAMING_SNAKE_CASE , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: __lowerCAmelCase : List[Any] = FlaxDownBlockaD( in_channels=_SCREAMING_SNAKE_CASE , out_channels=_SCREAMING_SNAKE_CASE , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[Any] = down_blocks # mid __lowerCAmelCase : List[str] = FlaxUNetMidBlockaDCrossAttn( in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) # up __lowerCAmelCase : Union[str, Any] = [] __lowerCAmelCase : int = list(reversed(_SCREAMING_SNAKE_CASE ) ) __lowerCAmelCase : Dict = list(reversed(_SCREAMING_SNAKE_CASE ) ) __lowerCAmelCase : Any = list(reversed(_SCREAMING_SNAKE_CASE ) ) __lowerCAmelCase : Optional[int] = reversed_block_out_channels[0] for i, up_block_type in enumerate(self.up_block_types ): __lowerCAmelCase : Optional[int] = output_channel __lowerCAmelCase : Any = reversed_block_out_channels[i] __lowerCAmelCase : Optional[Any] = reversed_block_out_channels[min(i + 1 , len(_SCREAMING_SNAKE_CASE ) - 1 )] __lowerCAmelCase : Optional[int] = i == len(_SCREAMING_SNAKE_CASE ) - 1 if up_block_type == "CrossAttnUpBlock2D": __lowerCAmelCase : Union[str, Any] = FlaxCrossAttnUpBlockaD( in_channels=_SCREAMING_SNAKE_CASE , out_channels=_SCREAMING_SNAKE_CASE , prev_output_channel=_SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: __lowerCAmelCase : Tuple = FlaxUpBlockaD( in_channels=_SCREAMING_SNAKE_CASE , out_channels=_SCREAMING_SNAKE_CASE , prev_output_channel=_SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , ) up_blocks.append(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : str = output_channel __lowerCAmelCase : int = up_blocks # out __lowerCAmelCase : Tuple = nn.GroupNorm(num_groups=32 , epsilon=1E-5 ) __lowerCAmelCase : Optional[Any] = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = False , ): # 1. time if not isinstance(_SCREAMING_SNAKE_CASE , jnp.ndarray ): __lowerCAmelCase : Optional[Any] = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(_SCREAMING_SNAKE_CASE , jnp.ndarray ) and len(timesteps.shape ) == 0: __lowerCAmelCase : Optional[int] = timesteps.astype(dtype=jnp.floataa ) __lowerCAmelCase : List[str] = jnp.expand_dims(_SCREAMING_SNAKE_CASE , 0 ) __lowerCAmelCase : Dict = self.time_proj(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Union[str, Any] = self.time_embedding(_SCREAMING_SNAKE_CASE ) # 2. pre-process __lowerCAmelCase : Union[str, Any] = jnp.transpose(_SCREAMING_SNAKE_CASE , (0, 2, 3, 1) ) __lowerCAmelCase : Union[str, Any] = self.conv_in(_SCREAMING_SNAKE_CASE ) # 3. down __lowerCAmelCase : str = (sample,) for down_block in self.down_blocks: if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase , __lowerCAmelCase : Optional[int] = down_block(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=not train ) else: __lowerCAmelCase , __lowerCAmelCase : Tuple = down_block(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=not train ) down_block_res_samples += res_samples if down_block_additional_residuals is not None: __lowerCAmelCase : str = () for down_block_res_sample, down_block_additional_residual in zip( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): down_block_res_sample += down_block_additional_residual new_down_block_res_samples += (down_block_res_sample,) __lowerCAmelCase : Any = new_down_block_res_samples # 4. mid __lowerCAmelCase : Union[str, Any] = self.mid_block(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=not train ) if mid_block_additional_residual is not None: sample += mid_block_additional_residual # 5. up for up_block in self.up_blocks: __lowerCAmelCase : Dict = down_block_res_samples[-(self.layers_per_block + 1) :] __lowerCAmelCase : str = down_block_res_samples[: -(self.layers_per_block + 1)] if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : int = up_block( _SCREAMING_SNAKE_CASE , temb=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , res_hidden_states_tuple=_SCREAMING_SNAKE_CASE , deterministic=not train , ) else: __lowerCAmelCase : Dict = up_block(_SCREAMING_SNAKE_CASE , temb=_SCREAMING_SNAKE_CASE , res_hidden_states_tuple=_SCREAMING_SNAKE_CASE , deterministic=not train ) # 6. post-process __lowerCAmelCase : str = self.conv_norm_out(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[Any] = nn.silu(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = self.conv_out(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : int = jnp.transpose(_SCREAMING_SNAKE_CASE , (0, 3, 1, 2) ) if not return_dict: return (sample,) return FlaxUNetaDConditionOutput(sample=_SCREAMING_SNAKE_CASE )
86
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[Any]: """simple docstring""" A__ = args.log_outputs A__ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] ) # load metric A__ = load_metric('''wer''' ) A__ = load_metric('''cer''' ) # compute metrics A__ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) A__ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) # print & log results A__ = f"""WER: {wer_result}\nCER: {cer_result}""" print(lowercase_ ) with open(f"""{dataset_id}_eval_results.txt""" , '''w''' ) as f: f.write(lowercase_ ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: A__ = f"""log_{dataset_id}_predictions.txt""" A__ = f"""log_{dataset_id}_targets.txt""" with open(lowercase_ , '''w''' ) as p, open(lowercase_ , '''w''' ) as t: # mapping function to write output def write_to_file(lowercase_ , lowercase_ ): p.write(f"""{i}""" + '''\n''' ) p.write(batch['''prediction'''] + '''\n''' ) t.write(f"""{i}""" + '''\n''' ) t.write(batch['''target'''] + '''\n''' ) result.map(lowercase_ , with_indices=lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str: """simple docstring""" A__ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training A__ = re.sub(lowercase_ , '''''' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! A__ = ['''\n\n''', '''\n''', ''' ''', ''' '''] for t in token_sequences_to_ignore: A__ = ''' '''.join(text.split(lowercase_ ) ) return text def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]: """simple docstring""" A__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowercase_ ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor A__ = AutoFeatureExtractor.from_pretrained(args.model_id ) A__ = feature_extractor.sampling_rate # resample audio A__ = dataset.cast_column('''audio''' , Audio(sampling_rate=lowercase_ ) ) # load eval pipeline if args.device is None: A__ = 0 if torch.cuda.is_available() else -1 A__ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(lowercase_ ): A__ = asr( batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) A__ = prediction['''text'''] A__ = normalize_text(batch['''sentence'''] ) return batch # run inference on all examples A__ = dataset.map(lowercase_ , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(lowercase_ , lowercase_ ) if __name__ == "__main__": _lowerCamelCase : List[Any] = argparse.ArgumentParser() parser.add_argument( """--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers""" ) parser.add_argument( """--dataset""", type=str, required=True, help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""", ) parser.add_argument( """--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice""" ) parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""") parser.add_argument( """--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds.""" ) parser.add_argument( """--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second.""" ) parser.add_argument( """--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis.""" ) parser.add_argument( """--device""", type=int, default=None, help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""", ) _lowerCamelCase : str = parser.parse_args() main(args)
14
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase = { '''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''], '''tokenization_electra''': ['''ElectraTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''ElectraTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ElectraForCausalLM''', '''ElectraForMaskedLM''', '''ElectraForMultipleChoice''', '''ElectraForPreTraining''', '''ElectraForQuestionAnswering''', '''ElectraForSequenceClassification''', '''ElectraForTokenClassification''', '''ElectraModel''', '''ElectraPreTrainedModel''', '''load_tf_weights_in_electra''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFElectraForMaskedLM''', '''TFElectraForMultipleChoice''', '''TFElectraForPreTraining''', '''TFElectraForQuestionAnswering''', '''TFElectraForSequenceClassification''', '''TFElectraForTokenClassification''', '''TFElectraModel''', '''TFElectraPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''FlaxElectraForCausalLM''', '''FlaxElectraForMaskedLM''', '''FlaxElectraForMultipleChoice''', '''FlaxElectraForPreTraining''', '''FlaxElectraForQuestionAnswering''', '''FlaxElectraForSequenceClassification''', '''FlaxElectraForTokenClassification''', '''FlaxElectraModel''', '''FlaxElectraPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
87
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) _lowerCamelCase : int = { """configuration_blip""": [ """BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BlipConfig""", """BlipTextConfig""", """BlipVisionConfig""", ], """processing_blip""": ["""BlipProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Tuple = ["""BlipImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : List[Any] = [ """BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """BlipModel""", """BlipPreTrainedModel""", """BlipForConditionalGeneration""", """BlipForQuestionAnswering""", """BlipVisionModel""", """BlipTextModel""", """BlipForImageTextRetrieval""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Optional[Any] = [ """TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBlipModel""", """TFBlipPreTrainedModel""", """TFBlipForConditionalGeneration""", """TFBlipForQuestionAnswering""", """TFBlipVisionModel""", """TFBlipTextModel""", """TFBlipForImageTextRetrieval""", ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys _lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
14
0
import bza import gzip import lzma import os import shutil import struct import tarfile import warnings import zipfile from abc import ABC, abstractmethod from pathlib import Path from typing import Dict, List, Optional, Type, Union from .. import config from .filelock import FileLock from .logging import get_logger __lowerCAmelCase : Any = get_logger(__name__) class UpperCAmelCase_ : '''simple docstring''' def __init__( self : List[Any] , UpperCamelCase__ : Optional[str] = None ) -> Optional[Any]: """simple docstring""" __magic_name__ = ( os.path.join(UpperCamelCase__ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH ) __magic_name__ = Extractor def _lowercase ( self : Optional[Any] , UpperCamelCase__ : str ) -> str: """simple docstring""" from .file_utils import hash_url_to_filename # Path where we extract compressed archives # We extract in the cache dir, and get the extracted path name by hashing the original path" __magic_name__ = os.path.abspath(UpperCamelCase__ ) return os.path.join(self.extract_dir , hash_url_to_filename(UpperCamelCase__ ) ) def _lowercase ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : bool ) -> bool: """simple docstring""" return force_extract or ( not os.path.isfile(UpperCamelCase__ ) and not (os.path.isdir(UpperCamelCase__ ) and os.listdir(UpperCamelCase__ )) ) def _lowercase ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : bool = False ) -> str: """simple docstring""" __magic_name__ = self.extractor.infer_extractor_format(UpperCamelCase__ ) if not extractor_format: return input_path __magic_name__ = self._get_output_path(UpperCamelCase__ ) if self._do_extract(UpperCamelCase__ , UpperCamelCase__ ): self.extractor.extract(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return output_path class UpperCAmelCase_ ( _A ): '''simple docstring''' @classmethod @abstractmethod def _lowercase ( cls : List[str] , UpperCamelCase__ : Union[Path, str] , **UpperCamelCase__ : Union[str, Any] ) -> bool: """simple docstring""" ... @staticmethod @abstractmethod def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None: """simple docstring""" ... class UpperCAmelCase_ ( _A , _A ): '''simple docstring''' a__ = [] @staticmethod def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : int ) -> List[str]: """simple docstring""" with open(UpperCamelCase__ , """rb""" ) as f: return f.read(UpperCamelCase__ ) @classmethod def _lowercase ( cls : List[Any] , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : bytes = b"" ) -> bool: """simple docstring""" if not magic_number: __magic_name__ = max(len(UpperCamelCase__ ) for cls_magic_number in cls.magic_numbers ) try: __magic_name__ = cls.read_magic_number(UpperCamelCase__ , UpperCamelCase__ ) except OSError: return False return any(magic_number.startswith(UpperCamelCase__ ) for cls_magic_number in cls.magic_numbers ) class UpperCAmelCase_ ( _A ): '''simple docstring''' @classmethod def _lowercase ( cls : Optional[Any] , UpperCamelCase__ : Union[Path, str] , **UpperCamelCase__ : int ) -> bool: """simple docstring""" return tarfile.is_tarfile(UpperCamelCase__ ) @staticmethod def _lowercase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ) -> Union[str, Any]: """simple docstring""" def resolved(UpperCamelCase__ : str ) -> str: return os.path.realpath(os.path.abspath(UpperCamelCase__ ) ) def badpath(UpperCamelCase__ : str , UpperCamelCase__ : str ) -> bool: # joinpath will ignore base if path is absolute return not resolved(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ).startswith(UpperCamelCase__ ) def badlink(UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ) -> bool: # Links are interpreted relative to the directory containing the link __magic_name__ = resolved(os.path.join(UpperCamelCase__ , os.path.dirname(info.name ) ) ) return badpath(info.linkname , base=UpperCamelCase__ ) __magic_name__ = resolved(UpperCamelCase__ ) for finfo in members: if badpath(finfo.name , UpperCamelCase__ ): logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' ) elif finfo.issym() and badlink(UpperCamelCase__ , UpperCamelCase__ ): logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' ) elif finfo.islnk() and badlink(UpperCamelCase__ , UpperCamelCase__ ): logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' ) else: yield finfo @staticmethod def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None: """simple docstring""" os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ ) __magic_name__ = tarfile.open(UpperCamelCase__ ) tar_file.extractall(UpperCamelCase__ , members=TarExtractor.safemembers(UpperCamelCase__ , UpperCamelCase__ ) ) tar_file.close() class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = [B"""\x1F\x8B"""] @staticmethod def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None: """simple docstring""" with gzip.open(UpperCamelCase__ , """rb""" ) as gzip_file: with open(UpperCamelCase__ , """wb""" ) as extracted_file: shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ ) class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = [ B"""PK\x03\x04""", B"""PK\x05\x06""", # empty archive B"""PK\x07\x08""", # spanned archive ] @classmethod def _lowercase ( cls : Union[str, Any] , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : bytes = b"" ) -> bool: """simple docstring""" if super().is_extractable(UpperCamelCase__ , magic_number=UpperCamelCase__ ): return True try: # Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives. # From: https://github.com/python/cpython/pull/5053 from zipfile import ( _CD_SIGNATURE, _ECD_DISK_NUMBER, _ECD_DISK_START, _ECD_ENTRIES_TOTAL, _ECD_OFFSET, _ECD_SIZE, _EndRecData, sizeCentralDir, stringCentralDir, structCentralDir, ) with open(UpperCamelCase__ , """rb""" ) as fp: __magic_name__ = _EndRecData(UpperCamelCase__ ) if endrec: if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0: return True # Empty zipfiles are still zipfiles elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]: fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir: __magic_name__ = fp.read(UpperCamelCase__ ) # CD is where we expect it to be if len(UpperCamelCase__ ) == sizeCentralDir: __magic_name__ = struct.unpack(UpperCamelCase__ , UpperCamelCase__ ) # CD is the right size if centdir[_CD_SIGNATURE] == stringCentralDir: return True # First central directory entry has correct magic number return False except Exception: # catch all errors in case future python versions change the zipfile internals return False @staticmethod def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None: """simple docstring""" os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ ) with zipfile.ZipFile(UpperCamelCase__ , """r""" ) as zip_file: zip_file.extractall(UpperCamelCase__ ) zip_file.close() class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = [B"""\xFD\x37\x7A\x58\x5A\x00"""] @staticmethod def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None: """simple docstring""" with lzma.open(UpperCamelCase__ ) as compressed_file: with open(UpperCamelCase__ , """wb""" ) as extracted_file: shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ ) class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID @staticmethod def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None: """simple docstring""" if not config.RARFILE_AVAILABLE: raise ImportError("""Please pip install rarfile""" ) import rarfile os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ ) __magic_name__ = rarfile.RarFile(UpperCamelCase__ ) rf.extractall(UpperCamelCase__ ) rf.close() class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = [B"""\x28\xb5\x2F\xFD"""] @staticmethod def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None: """simple docstring""" if not config.ZSTANDARD_AVAILABLE: raise ImportError("""Please pip install zstandard""" ) import zstandard as zstd __magic_name__ = zstd.ZstdDecompressor() with open(UpperCamelCase__ , """rb""" ) as ifh, open(UpperCamelCase__ , """wb""" ) as ofh: dctx.copy_stream(UpperCamelCase__ , UpperCamelCase__ ) class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = [B"""\x42\x5A\x68"""] @staticmethod def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None: """simple docstring""" with bza.open(UpperCamelCase__ , """rb""" ) as compressed_file: with open(UpperCamelCase__ , """wb""" ) as extracted_file: shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ ) class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = [B"""\x37\x7A\xBC\xAF\x27\x1C"""] @staticmethod def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None: """simple docstring""" if not config.PY7ZR_AVAILABLE: raise ImportError("""Please pip install py7zr""" ) import pyazr os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ ) with pyazr.SevenZipFile(UpperCamelCase__ , """r""" ) as archive: archive.extractall(UpperCamelCase__ ) class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = [B"""\x04\x22\x4D\x18"""] @staticmethod def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None: """simple docstring""" if not config.LZ4_AVAILABLE: raise ImportError("""Please pip install lz4""" ) import lza.frame with lza.frame.open(UpperCamelCase__ , """rb""" ) as compressed_file: with open(UpperCamelCase__ , """wb""" ) as extracted_file: shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ ) class UpperCAmelCase_ : '''simple docstring''' a__ = { "tar": TarExtractor, "gzip": GzipExtractor, "zip": ZipExtractor, "xz": XzExtractor, "rar": RarExtractor, "zstd": ZstdExtractor, "bz2": BzipaExtractor, "7z": SevenZipExtractor, # <Added version="2.4.0"/> "lz4": LzaExtractor, # <Added version="2.4.0"/> } @classmethod def _lowercase ( cls : Tuple ) -> Tuple: """simple docstring""" return max( len(UpperCamelCase__ ) for extractor in cls.extractors.values() if issubclass(UpperCamelCase__ , UpperCamelCase__ ) for extractor_magic_number in extractor.magic_numbers ) @staticmethod def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : int ) -> Union[str, Any]: """simple docstring""" try: return MagicNumberBaseExtractor.read_magic_number(UpperCamelCase__ , magic_number_length=UpperCamelCase__ ) except OSError: return b"" @classmethod def _lowercase ( cls : List[Any] , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : bool = False ) -> bool: """simple docstring""" warnings.warn( """Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """ """Use 'infer_extractor_format' instead.""" , category=UpperCamelCase__ , ) __magic_name__ = cls.infer_extractor_format(UpperCamelCase__ ) if extractor_format: return True if not return_extractor else (True, cls.extractors[extractor_format]) return False if not return_extractor else (False, None) @classmethod def _lowercase ( cls : Dict , UpperCamelCase__ : Union[Path, str] ) -> str: # <Added version="2.4.0"/> """simple docstring""" __magic_name__ = cls._get_magic_number_max_length() __magic_name__ = cls._read_magic_number(UpperCamelCase__ , UpperCamelCase__ ) for extractor_format, extractor in cls.extractors.items(): if extractor.is_extractable(UpperCamelCase__ , magic_number=UpperCamelCase__ ): return extractor_format @classmethod def _lowercase ( cls : Union[str, Any] , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[BaseExtractor] = "deprecated" , ) -> None: """simple docstring""" os.makedirs(os.path.dirname(UpperCamelCase__ ) , exist_ok=UpperCamelCase__ ) # Prevent parallel extractions __magic_name__ = str(Path(UpperCamelCase__ ).with_suffix(""".lock""" ) ) with FileLock(UpperCamelCase__ ): shutil.rmtree(UpperCamelCase__ , ignore_errors=UpperCamelCase__ ) if extractor_format or extractor != "deprecated": if extractor != "deprecated" or not isinstance(UpperCamelCase__ , UpperCamelCase__ ): # passed as positional arg warnings.warn( """Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """ """Use 'extractor_format' instead.""" , category=UpperCamelCase__ , ) __magic_name__ = extractor if extractor != """deprecated""" else extractor_format else: __magic_name__ = cls.extractors[extractor_format] return extractor.extract(UpperCamelCase__ , UpperCamelCase__ ) else: warnings.warn( """Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """ """exception in 3.0.0.""" , category=UpperCamelCase__ , ) for extractor in cls.extractors.values(): if extractor.is_extractable(UpperCamelCase__ ): return extractor.extract(UpperCamelCase__ , UpperCamelCase__ )
88
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCamelCase : List[str] = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : List[Any] = [ """VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""", """ViTMSNModel""", """ViTMSNForImageClassification""", """ViTMSNPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys _lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
14
0
'''simple docstring''' import argparse import torch from transformers import ( EncodecConfig, EncodecFeatureExtractor, EncodecModel, logging, ) # checkpoints downloaded from: # https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th # https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin # https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th logging.set_verbosity_info() __lowerCAmelCase = logging.get_logger('''transformers.models.encodec''') __lowerCAmelCase = { '''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''', '''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''', '''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''', '''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''', } __lowerCAmelCase = { '''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''', '''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''', '''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''', '''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''', '''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''', '''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''', '''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''', '''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''', '''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''', '''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''', '''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''', '''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''', '''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''', '''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''', '''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''', '''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''', '''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''', '''encoder.model.13.lstm''': '''encoder.layers.13.lstm''', '''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''', } __lowerCAmelCase = { '''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''', '''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''', '''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''', '''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''', '''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''', '''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''', '''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''', '''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''', '''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''', '''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''', '''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''', '''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''', '''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''', '''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''', '''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''', '''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''', '''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''', '''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''', } __lowerCAmelCase = { '''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''', '''decoder.model.1.lstm''': '''decoder.layers.1.lstm''', '''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''', '''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''', '''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''', '''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''', '''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''', '''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''', '''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''', '''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''', '''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''', '''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''', '''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''', '''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''', '''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''', '''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''', '''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''', '''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''', '''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''', } __lowerCAmelCase = { '''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''', '''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''', '''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''', '''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''', '''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''', '''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''', '''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''', '''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''', '''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''', '''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''', '''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''', '''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''', '''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''', '''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''', '''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''', '''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''', '''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''', '''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''', } __lowerCAmelCase = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_DECODER, } __lowerCAmelCase = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_ENCODER_48K, **MAPPING_DECODER, **MAPPING_DECODER_48K, } __lowerCAmelCase = [] __lowerCAmelCase = [] def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any: for attribute in key.split('.' ): _a : Optional[int] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ) if weight_type is not None: _a : int = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape else: _a : Any = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": _a : Any = value elif weight_type == "weight_g": _a : Tuple = value elif weight_type == "weight_v": _a : Tuple = value elif weight_type == "bias": _a : Union[str, Any] = value elif weight_type == "running_mean": _a : int = value elif weight_type == "running_var": _a : Union[str, Any] = value elif weight_type == "num_batches_tracked": _a : Tuple = value elif weight_type == "weight_ih_l0": _a : Tuple = value elif weight_type == "weight_hh_l0": _a : int = value elif weight_type == "bias_ih_l0": _a : str = value elif weight_type == "bias_hh_l0": _a : Optional[int] = value elif weight_type == "weight_ih_l1": _a : Any = value elif weight_type == "weight_hh_l1": _a : Optional[Any] = value elif weight_type == "bias_ih_l1": _a : Any = value elif weight_type == "bias_hh_l1": _a : str = value else: _a : Optional[int] = value logger.info(f"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" ) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]: for key in ignore_keys: if key.endswith('.*' ): if name.startswith(key[:-1] ): return True elif ".*." in key: _a , _a : Union[str, Any] = key.split('.*.' ) if prefix in name and suffix in name: return True elif key in name: return True return False def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict: _a : Union[str, Any] = [] if model_name == "encodec_24khz" or "encodec_32khz": _a : Tuple = MAPPING_24K elif model_name == "encodec_48khz": _a : str = MAPPING_48K else: raise ValueError(f"""Unsupported model: {model_name}""" ) for name, value in orig_dict.items(): if should_ignore(lowerCAmelCase_ , lowerCAmelCase_ ): logger.info(f"""{name} was ignored""" ) continue _a : List[Any] = False for key, mapped_key in MAPPING.items(): if "*" in key: _a , _a : Optional[Any] = key.split('.*.' ) if prefix in name and suffix in name: _a : Tuple = suffix if key in name: # HACK otherwise .embed gets initialized with .embed_avg too if key.endswith('embed' ) and name.endswith('embed_avg' ): continue _a : Union[str, Any] = True if "*" in mapped_key: _a : List[Any] = name.split(lowerCAmelCase_ )[0].split('.' )[-2] _a : Any = mapped_key.replace('*' , lowerCAmelCase_ ) if "weight_g" in name: _a : str = 'weight_g' elif "weight_v" in name: _a : Optional[int] = 'weight_v' elif "weight_ih_l0" in name: _a : List[str] = 'weight_ih_l0' elif "weight_hh_l0" in name: _a : Dict = 'weight_hh_l0' elif "bias_ih_l0" in name: _a : Tuple = 'bias_ih_l0' elif "bias_hh_l0" in name: _a : Optional[Any] = 'bias_hh_l0' elif "weight_ih_l1" in name: _a : Dict = 'weight_ih_l1' elif "weight_hh_l1" in name: _a : Dict = 'weight_hh_l1' elif "bias_ih_l1" in name: _a : Optional[Any] = 'bias_ih_l1' elif "bias_hh_l1" in name: _a : Optional[Any] = 'bias_hh_l1' elif "bias" in name: _a : Tuple = 'bias' elif "weight" in name: _a : List[str] = 'weight' elif "running_mean" in name: _a : Optional[Any] = 'running_mean' elif "running_var" in name: _a : int = 'running_var' elif "num_batches_tracked" in name: _a : List[Any] = 'num_batches_tracked' else: _a : List[str] = None set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) continue if not is_used: unused_weights.append(lowerCAmelCase_ ) logger.warning(f"""Unused weights: {unused_weights}""" ) @torch.no_grad() def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> Union[str, Any]: if config_path is not None: _a : Optional[Any] = EncodecConfig.from_pretrained(lowerCAmelCase_ ) else: _a : int = EncodecConfig() if model_name == "encodec_24khz": pass # config is already correct elif model_name == "encodec_32khz": _a : Dict = [8, 5, 4, 4] _a : Optional[int] = [2.2] _a : Optional[Any] = 64 _a : Any = 32000 _a : Dict = 2048 _a : Dict = False _a : List[Any] = False _a : List[Any] = False elif model_name == "encodec_48khz": _a : Optional[int] = [8, 5, 4, 2] _a : Optional[Any] = [3.0, 6.0, 12.0, 24.0] _a : str = 48000 _a : Optional[int] = 2 _a : List[str] = False _a : List[Any] = 'time_group_norm' _a : Optional[int] = True _a : str = 1.0 _a : List[Any] = 0.01 else: raise ValueError(f"""Unknown model name: {model_name}""" ) _a : Optional[int] = EncodecModel(lowerCAmelCase_ ) _a : List[Any] = EncodecFeatureExtractor( feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , ) feature_extractor.save_pretrained(lowerCAmelCase_ ) _a : Tuple = torch.load(lowerCAmelCase_ ) if "best_state" in original_checkpoint: # we might have a training state saved, in which case discard the yaml results and just retain the weights _a : Union[str, Any] = original_checkpoint['best_state'] recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) model.save_pretrained(lowerCAmelCase_ ) if repo_id: print('Pushing to the hub...' ) feature_extractor.push_to_hub(lowerCAmelCase_ ) model.push_to_hub(lowerCAmelCase_ ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( '''--model''', default='''encodec_24khz''', type=str, help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''', ) parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) __lowerCAmelCase = parser.parse_args() convert_checkpoint( args.model, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
89
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> "list[int]": """simple docstring""" if upper_limit < 0: raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' ) A__ = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 A__ = 1 if upper_limit > 0: A__ = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(lowercase_ ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""") print("""\n*** Enter -1 at any time to quit ***""") print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""") try: while True: _lowerCamelCase : List[Any] = int(input().strip()) if N < 0: print("""\n********* Goodbye!! ************""") break else: print(F'''The Catalan numbers from 0 through {N} are:''') print(catalan_numbers(N)) print("""Try another upper limit for the sequence: """, end="""""") except (NameError, ValueError): print("""\n********* Invalid input, goodbye! ************\n""") import doctest doctest.testmod()
14
0
from math import sqrt def lowerCamelCase_ ( UpperCamelCase__ : int ) -> bool: """simple docstring""" assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and ( number >= 0 ), "'number' must been an int and positive" __lowerCamelCase = True # 0 and 1 are none primes. if number <= 1: __lowerCamelCase = False for divisor in range(2 , int(round(sqrt(UpperCamelCase__ ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: __lowerCamelCase = False break # precondition assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "'status' must been from type bool" return status def lowerCamelCase_ ( UpperCamelCase__ : str ) -> Tuple: """simple docstring""" assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N __lowerCamelCase = list(range(2 , n + 1 ) ) __lowerCamelCase = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(UpperCamelCase__ ) ): for j in range(i + 1 , len(UpperCamelCase__ ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): __lowerCamelCase = 0 # filters actual prime numbers. __lowerCamelCase = [x for x in begin_list if x != 0] # precondition assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "'ans' must been from type list" return ans def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> List[str]: """simple docstring""" assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (n > 2), "'N' must been an int and > 2" __lowerCamelCase = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(UpperCamelCase__ ): ans.append(UpperCamelCase__ ) # precondition assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "'ans' must been from type list" return ans def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] ) -> int: """simple docstring""" assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and number >= 0, "'number' must been an int and >= 0" __lowerCamelCase = [] # this list will be returns of the function. # potential prime number factors. __lowerCamelCase = 2 __lowerCamelCase = number if number == 0 or number == 1: ans.append(UpperCamelCase__ ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(UpperCamelCase__ ): while quotient != 1: if is_prime(UpperCamelCase__ ) and (quotient % factor == 0): ans.append(UpperCamelCase__ ) quotient /= factor else: factor += 1 else: ans.append(UpperCamelCase__ ) # precondition assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "'ans' must been from type list" return ans def lowerCamelCase_ ( UpperCamelCase__ : Tuple ) -> Optional[int]: """simple docstring""" assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and ( number >= 0 ), "'number' bust been an int and >= 0" __lowerCamelCase = 0 # prime factorization of 'number' __lowerCamelCase = prime_factorization(UpperCamelCase__ ) __lowerCamelCase = max(UpperCamelCase__ ) # precondition assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "'ans' must been from type int" return ans def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> Tuple: """simple docstring""" assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and ( number >= 0 ), "'number' bust been an int and >= 0" __lowerCamelCase = 0 # prime factorization of 'number' __lowerCamelCase = prime_factorization(UpperCamelCase__ ) __lowerCamelCase = min(UpperCamelCase__ ) # precondition assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "'ans' must been from type int" return ans def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] ) -> Optional[int]: """simple docstring""" assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "'number' must been an int" assert isinstance(number % 2 == 0 , UpperCamelCase__ ), "compare bust been from type bool" return number % 2 == 0 def lowerCamelCase_ ( UpperCamelCase__ : str ) -> str: """simple docstring""" assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "'number' must been an int" assert isinstance(number % 2 != 0 , UpperCamelCase__ ), "compare bust been from type bool" return number % 2 != 0 def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> Optional[Any]: """simple docstring""" assert ( isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (number > 2) and is_even(UpperCamelCase__ ) ), "'number' must been an int, even and > 2" __lowerCamelCase = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' __lowerCamelCase = get_prime_numbers(UpperCamelCase__ ) __lowerCamelCase = len(UpperCamelCase__ ) # run variable for while-loops. __lowerCamelCase = 0 __lowerCamelCase = None # exit variable. for break up the loops __lowerCamelCase = True while i < len_pn and loop: __lowerCamelCase = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: __lowerCamelCase = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (len(UpperCamelCase__ ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] ) -> Optional[Any]: """simple docstring""" assert ( isinstance(UpperCamelCase__ , UpperCamelCase__ ) and isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." __lowerCamelCase = 0 while numbera != 0: __lowerCamelCase = numbera % numbera __lowerCamelCase = numbera __lowerCamelCase = rest # precondition assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : str ) -> int: """simple docstring""" assert ( isinstance(UpperCamelCase__ , UpperCamelCase__ ) and isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." __lowerCamelCase = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' __lowerCamelCase = prime_factorization(UpperCamelCase__ ) __lowerCamelCase = prime_factorization(UpperCamelCase__ ) elif numbera == 1 or numbera == 1: __lowerCamelCase = [] __lowerCamelCase = [] __lowerCamelCase = max(UpperCamelCase__ , UpperCamelCase__ ) __lowerCamelCase = 0 __lowerCamelCase = 0 __lowerCamelCase = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: __lowerCamelCase = prime_fac_a.count(UpperCamelCase__ ) __lowerCamelCase = prime_fac_a.count(UpperCamelCase__ ) for _ in range(max(UpperCamelCase__ , UpperCamelCase__ ) ): ans *= n else: __lowerCamelCase = prime_fac_a.count(UpperCamelCase__ ) for _ in range(UpperCamelCase__ ): ans *= n done.append(UpperCamelCase__ ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: __lowerCamelCase = prime_fac_a.count(UpperCamelCase__ ) for _ in range(UpperCamelCase__ ): ans *= n done.append(UpperCamelCase__ ) # precondition assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def lowerCamelCase_ ( UpperCamelCase__ : Dict ) -> str: """simple docstring""" assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (n >= 0), "'number' must been a positive int" __lowerCamelCase = 0 __lowerCamelCase = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(UpperCamelCase__ ): ans += 1 # precondition assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and is_prime( UpperCamelCase__ ), "'ans' must been a prime number and from type int" return ans def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : Tuple ) -> Tuple: """simple docstring""" assert ( is_prime(UpperCamelCase__ ) and is_prime(UpperCamelCase__ ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" __lowerCamelCase = p_number_a + 1 # jump to the next number __lowerCamelCase = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(UpperCamelCase__ ): number += 1 while number < p_number_a: ans.append(UpperCamelCase__ ) number += 1 # fetch the next prime number. while not is_prime(UpperCamelCase__ ): number += 1 # precondition assert ( isinstance(UpperCamelCase__ , UpperCamelCase__ ) and ans[0] != p_number_a and ans[len(UpperCamelCase__ ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def lowerCamelCase_ ( UpperCamelCase__ : int ) -> Optional[Any]: """simple docstring""" assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (n >= 1), "'n' must been int and >= 1" __lowerCamelCase = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(UpperCamelCase__ ) # precondition assert ans[0] == 1 and ans[len(UpperCamelCase__ ) - 1] == n, "Error in function getDivisiors(...)" return ans def lowerCamelCase_ ( UpperCamelCase__ : Dict ) -> Optional[int]: """simple docstring""" assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and ( number > 1 ), "'number' must been an int and >= 1" __lowerCamelCase = get_divisors(UpperCamelCase__ ) # precondition assert ( isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (divisors[0] == 1) and (divisors[len(UpperCamelCase__ ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] ) -> List[Any]: """simple docstring""" assert ( isinstance(UpperCamelCase__ , UpperCamelCase__ ) and isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. __lowerCamelCase = gcd(abs(UpperCamelCase__ ) , abs(UpperCamelCase__ ) ) # precondition assert ( isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def lowerCamelCase_ ( UpperCamelCase__ : List[str] ) -> Optional[Any]: """simple docstring""" assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (n >= 0), "'n' must been a int and >= 0" __lowerCamelCase = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (n >= 0), "'n' must been an int and >= 0" __lowerCamelCase = 0 __lowerCamelCase = 1 __lowerCamelCase = 1 # this will be return for _ in range(n - 1 ): __lowerCamelCase = ans ans += fiba __lowerCamelCase = tmp return ans
90
import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict: """simple docstring""" A__ = args.pruning_method A__ = args.threshold A__ = args.model_name_or_path.rstrip('''/''' ) A__ = args.target_model_path print(f"""Load fine-pruned model from {model_name_or_path}""" ) A__ = torch.load(os.path.join(lowercase_ , '''pytorch_model.bin''' ) ) A__ = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: A__ = tensor print(f"""Copied layer {name}""" ) elif "classifier" in name or "qa_output" in name: A__ = tensor print(f"""Copied layer {name}""" ) elif "bias" in name: A__ = tensor print(f"""Copied layer {name}""" ) else: if pruning_method == "magnitude": A__ = MagnitudeBinarizer.apply(inputs=lowercase_ , threshold=lowercase_ ) A__ = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "topK": if "mask_scores" in name: continue A__ = name[:-6] A__ = model[f"""{prefix_}mask_scores"""] A__ = TopKBinarizer.apply(lowercase_ , lowercase_ ) A__ = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue A__ = name[:-6] A__ = model[f"""{prefix_}mask_scores"""] A__ = ThresholdBinarizer.apply(lowercase_ , lowercase_ , lowercase_ ) A__ = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "l0": if "mask_scores" in name: continue A__ = name[:-6] A__ = model[f"""{prefix_}mask_scores"""] A__ , A__ = -0.1, 1.1 A__ = torch.sigmoid(lowercase_ ) A__ = s * (r - l) + l A__ = s_bar.clamp(min=0.0 , max=1.0 ) A__ = tensor * mask print(f"""Pruned layer {name}""" ) else: raise ValueError('''Unknown pruning method''' ) if target_model_path is None: A__ = os.path.join( os.path.dirname(lowercase_ ) , f"""bertarized_{os.path.basename(lowercase_ )}""" ) if not os.path.isdir(lowercase_ ): shutil.copytree(lowercase_ , lowercase_ ) print(f"""\nCreated folder {target_model_path}""" ) torch.save(lowercase_ , os.path.join(lowercase_ , '''pytorch_model.bin''' ) ) print('''\nPruned model saved! See you later!''' ) if __name__ == "__main__": _lowerCamelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument( """--pruning_method""", choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""], type=str, required=True, help=( """Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,""" """ sigmoied_threshold = Soft movement pruning)""" ), ) parser.add_argument( """--threshold""", type=float, required=False, help=( """For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.""" """For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.""" """Not needed for `l0`""" ), ) parser.add_argument( """--model_name_or_path""", type=str, required=True, help="""Folder containing the model that was previously fine-pruned""", ) parser.add_argument( """--target_model_path""", default=None, type=str, required=False, help="""Folder containing the model that was previously fine-pruned""", ) _lowerCamelCase : int = parser.parse_args() main(args)
14
0
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__) UpperCAmelCase_ : List[Any] = { """asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""", # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = "sew-d" def __init__( self : Dict , lowercase_ : Optional[Any]=32 , lowercase_ : List[Any]=768 , lowercase_ : int=12 , lowercase_ : Dict=12 , lowercase_ : Union[str, Any]=3072 , lowercase_ : Dict=2 , lowercase_ : List[Any]=512 , lowercase_ : Union[str, Any]=256 , lowercase_ : Optional[int]=True , lowercase_ : List[str]=True , lowercase_ : List[Any]=("p2c", "c2p") , lowercase_ : Optional[int]="layer_norm" , lowercase_ : List[Any]="gelu_python" , lowercase_ : int=0.1 , lowercase_ : Optional[int]=0.1 , lowercase_ : Optional[int]=0.1 , lowercase_ : List[str]=0.0 , lowercase_ : Any=0.1 , lowercase_ : Dict=0.02 , lowercase_ : str=1e-7 , lowercase_ : Optional[int]=1e-5 , lowercase_ : int="group" , lowercase_ : str="gelu" , lowercase_ : List[str]=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowercase_ : int=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase_ : List[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase_ : List[Any]=False , lowercase_ : int=128 , lowercase_ : List[Any]=16 , lowercase_ : Tuple=True , lowercase_ : Any=0.05 , lowercase_ : Tuple=10 , lowercase_ : List[str]=2 , lowercase_ : Any=0.0 , lowercase_ : int=10 , lowercase_ : Optional[Any]=0 , lowercase_ : Optional[Any]="mean" , lowercase_ : List[Any]=False , lowercase_ : int=False , lowercase_ : str=256 , lowercase_ : int=0 , lowercase_ : str=1 , lowercase_ : Any=2 , **lowercase_ : Union[str, Any] , ): '''simple docstring''' super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_) SCREAMING_SNAKE_CASE_ : Any = hidden_size SCREAMING_SNAKE_CASE_ : Union[str, Any] = feat_extract_norm SCREAMING_SNAKE_CASE_ : Optional[Any] = feat_extract_activation SCREAMING_SNAKE_CASE_ : Optional[int] = list(lowercase_) SCREAMING_SNAKE_CASE_ : str = list(lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = list(lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = conv_bias SCREAMING_SNAKE_CASE_ : int = num_conv_pos_embeddings SCREAMING_SNAKE_CASE_ : Dict = num_conv_pos_embedding_groups SCREAMING_SNAKE_CASE_ : List[Any] = len(self.conv_dim) SCREAMING_SNAKE_CASE_ : Optional[Any] = num_hidden_layers SCREAMING_SNAKE_CASE_ : Dict = intermediate_size SCREAMING_SNAKE_CASE_ : int = squeeze_factor SCREAMING_SNAKE_CASE_ : Tuple = max_position_embeddings SCREAMING_SNAKE_CASE_ : Any = position_buckets SCREAMING_SNAKE_CASE_ : Tuple = share_att_key SCREAMING_SNAKE_CASE_ : Optional[int] = relative_attention SCREAMING_SNAKE_CASE_ : Tuple = norm_rel_ebd SCREAMING_SNAKE_CASE_ : Optional[int] = list(lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_act SCREAMING_SNAKE_CASE_ : int = num_attention_heads SCREAMING_SNAKE_CASE_ : Dict = hidden_dropout SCREAMING_SNAKE_CASE_ : List[str] = attention_dropout SCREAMING_SNAKE_CASE_ : Optional[int] = activation_dropout SCREAMING_SNAKE_CASE_ : str = feat_proj_dropout SCREAMING_SNAKE_CASE_ : Optional[Any] = final_dropout SCREAMING_SNAKE_CASE_ : Any = layer_norm_eps SCREAMING_SNAKE_CASE_ : Optional[int] = feature_layer_norm_eps SCREAMING_SNAKE_CASE_ : Dict = initializer_range SCREAMING_SNAKE_CASE_ : Any = vocab_size if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' F'but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)' F'= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.') # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 SCREAMING_SNAKE_CASE_ : Optional[Any] = apply_spec_augment SCREAMING_SNAKE_CASE_ : Union[str, Any] = mask_time_prob SCREAMING_SNAKE_CASE_ : Dict = mask_time_length SCREAMING_SNAKE_CASE_ : Optional[Any] = mask_time_min_masks SCREAMING_SNAKE_CASE_ : List[Any] = mask_feature_prob SCREAMING_SNAKE_CASE_ : Tuple = mask_feature_length SCREAMING_SNAKE_CASE_ : Union[str, Any] = mask_feature_min_masks # ctc loss SCREAMING_SNAKE_CASE_ : int = ctc_loss_reduction SCREAMING_SNAKE_CASE_ : Any = ctc_zero_infinity # sequence classification SCREAMING_SNAKE_CASE_ : Optional[Any] = use_weighted_layer_sum SCREAMING_SNAKE_CASE_ : List[Any] = classifier_proj_size @property def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1)
91
_lowerCamelCase : Optional[int] = 65521 def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int: """simple docstring""" A__ = 1 A__ = 0 for plain_chr in plain_text: A__ = (a + ord(lowercase_ )) % MOD_ADLER A__ = (b + a) % MOD_ADLER return (b << 16) | a
14
0
from __future__ import annotations import numpy as np def _a ( SCREAMING_SNAKE_CASE_ : list[float] ): return np.maximum(0 , SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
92
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer _lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) _lowerCamelCase : Tuple = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} _lowerCamelCase : Union[str, Any] = { """vocab_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } _lowerCamelCase : str = { """vocab_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } _lowerCamelCase : str = { """vocab_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json""" ), }, } _lowerCamelCase : Any = { """facebook/dpr-ctx_encoder-single-nq-base""": 512, """facebook/dpr-ctx_encoder-multiset-base""": 512, } _lowerCamelCase : List[str] = { """facebook/dpr-question_encoder-single-nq-base""": 512, """facebook/dpr-question_encoder-multiset-base""": 512, } _lowerCamelCase : Tuple = { """facebook/dpr-reader-single-nq-base""": 512, """facebook/dpr-reader-multiset-base""": 512, } _lowerCamelCase : Optional[Any] = { """facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True}, } _lowerCamelCase : Optional[int] = { """facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True}, } _lowerCamelCase : Optional[Any] = { """facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True}, } class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ = DPRContextEncoderTokenizer class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ = DPRQuestionEncoderTokenizer _lowerCamelCase : int = collections.namedtuple( """DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""] ) _lowerCamelCase : Any = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""]) _lowerCamelCase : Dict = r""" Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. """ @add_start_docstrings(UpperCAmelCase__ ) class UpperCamelCase_ : '''simple docstring''' def __call__( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Union[bool, str] = False , UpperCAmelCase__ : Union[bool, str] = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Optional[bool] = None , **UpperCAmelCase__ : Optional[int] , ) ->BatchEncoding: '''simple docstring''' if titles is None and texts is None: return super().__call__( UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , ) elif titles is None or texts is None: A__ = titles if texts is None else texts return super().__call__( UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , ) A__ = titles if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [titles] A__ = texts if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [texts] A__ = len(UpperCAmelCase__) A__ = questions if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [questions] * n_passages assert len(UpperCAmelCase__) == len( UpperCAmelCase__), f"""There should be as many titles than texts but got {len(UpperCAmelCase__)} titles and {len(UpperCAmelCase__)} texts.""" A__ = super().__call__(UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__)['''input_ids'''] A__ = super().__call__(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__)['''input_ids'''] A__ = { '''input_ids''': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(UpperCAmelCase__ , UpperCAmelCase__) ] } if return_attention_mask is not False: A__ = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids]) A__ = attention_mask return self.pad(UpperCAmelCase__ , padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : BatchEncoding , UpperCAmelCase__ : DPRReaderOutput , UpperCAmelCase__ : int = 16 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 4 , ) ->List[DPRSpanPrediction]: '''simple docstring''' A__ = reader_input['''input_ids'''] A__ , A__ , A__ = reader_output[:3] A__ = len(UpperCAmelCase__) A__ = sorted(range(UpperCAmelCase__) , reverse=UpperCAmelCase__ , key=relevance_logits.__getitem__) A__ = [] for doc_id in sorted_docs: A__ = list(input_ids[doc_id]) # assuming question & title information is at the beginning of the sequence A__ = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: A__ = sequence_ids.index(self.pad_token_id) else: A__ = len(UpperCAmelCase__) A__ = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCAmelCase__ , top_spans=UpperCAmelCase__ , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCAmelCase__ , start_index=UpperCAmelCase__ , end_index=UpperCAmelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1]) , )) if len(UpperCAmelCase__) >= num_spans: break return nbest_spans_predictions[:num_spans] def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , ) ->List[DPRSpanPrediction]: '''simple docstring''' A__ = [] for start_index, start_score in enumerate(UpperCAmelCase__): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]): scores.append(((start_index, start_index + answer_length), start_score + end_score)) A__ = sorted(UpperCAmelCase__ , key=lambda UpperCAmelCase__: x[1] , reverse=UpperCAmelCase__) A__ = [] for (start_index, end_index), score in scores: assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]""" A__ = end_index - start_index + 1 assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}""" if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals): continue chosen_span_intervals.append((start_index, end_index)) if len(UpperCAmelCase__) == top_spans: break return chosen_span_intervals @add_end_docstrings(UpperCAmelCase__ ) class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = READER_PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ = ['''input_ids''', '''attention_mask'''] UpperCAmelCase__ = DPRReaderTokenizer
14
0
'''simple docstring''' from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _lowercase : Tuple = _symbol_database.Default() _lowercase : List[Any] = _descriptor_pool.Default().AddSerializedFile( B"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03" ) _lowercase : Optional[Any] = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals) if _descriptor._USE_C_DESCRIPTORS is False: _lowercase : Optional[Any] = None _lowercase : int = B"H\003" # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" _lowercase : Dict = 4_5 _lowercase : int = 1_5_8_1 _lowercase : Any = 1_5_1_7 _lowercase : Dict = 1_5_7_0 _lowercase : List[str] = 1_5_8_4 _lowercase : Any = 1_7_9_3 _lowercase : Optional[int] = 1_7_9_5 _lowercase : List[Any] = 1_9_1_6 _lowercase : List[str] = 1_8_6_4 _lowercase : Optional[Any] = 1_9_0_5 _lowercase : List[Any] = 1_9_1_9 _lowercase : int = 2_4_2_9 _lowercase : Dict = 2_2_0_8 _lowercase : Any = 2_4_1_8 _lowercase : str = 2_3_2_3 _lowercase : str = 2_4_0_7 # @@protoc_insertion_point(module_scope)
93
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase : Any = logging.get_logger(__name__) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''encoder-decoder''' UpperCAmelCase__ = True def __init__( self : List[str] , **UpperCAmelCase__ : Union[str, Any]) ->List[Any]: '''simple docstring''' super().__init__(**UpperCAmelCase__) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" A__ = kwargs.pop('''encoder''') A__ = encoder_config.pop('''model_type''') A__ = kwargs.pop('''decoder''') A__ = decoder_config.pop('''model_type''') from ..auto.configuration_auto import AutoConfig A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__) A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__) A__ = True @classmethod def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Union[str, Any]) ->PretrainedConfig: '''simple docstring''' logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''') A__ = True A__ = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]: '''simple docstring''' A__ = copy.deepcopy(self.__dict__) A__ = self.encoder.to_dict() A__ = self.decoder.to_dict() A__ = self.__class__.model_type return output
14
0
def __lowerCamelCase ( UpperCAmelCase_ : list[int] ): """simple docstring""" if not numbers: return 0 if not isinstance(UpperCAmelCase_ , (list, tuple) ) or not all( isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for number in numbers ): raise ValueError('''numbers must be an iterable of integers''' ) a :Tuple = numbers[0] for i in range(1 , len(UpperCAmelCase_ ) ): # update the maximum and minimum subarray products a :List[str] = numbers[i] if number < 0: a , a :Optional[int] = min_till_now, max_till_now a :Any = max(UpperCAmelCase_ , max_till_now * number ) a :Any = min(UpperCAmelCase_ , min_till_now * number ) # update the maximum product found till now a :Optional[int] = max(UpperCAmelCase_ , UpperCAmelCase_ ) return max_prod
94
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any: """simple docstring""" A__ = [0] * len(lowercase_ ) A__ = [] A__ = [1] * len(lowercase_ ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(lowercase_ ) ): if indegree[i] == 0: queue.append(lowercase_ ) while queue: A__ = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: A__ = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(lowercase_ ) print(max(lowercase_ ) ) # Adjacency list of Graph _lowerCamelCase : Optional[int] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
14
0
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCAmelCase : int = { """configuration_efficientnet""": [ """EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """EfficientNetConfig""", """EfficientNetOnnxConfig""", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Optional[int] = ["""EfficientNetImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Dict = [ """EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """EfficientNetForImageClassification""", """EfficientNetModel""", """EfficientNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_efficientnet import ( EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientNetConfig, EfficientNetOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientnet import EfficientNetImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientnet import ( EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientNetForImageClassification, EfficientNetModel, EfficientNetPreTrainedModel, ) else: import sys UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
95
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline _lowerCamelCase : Optional[Any] = datasets.utils.logging.get_logger(__name__) @dataclass class UpperCamelCase_ ( datasets.BuilderConfig ): '''simple docstring''' UpperCAmelCase__ = None UpperCAmelCase__ = "utf-8" UpperCAmelCase__ = None UpperCAmelCase__ = None UpperCAmelCase__ = True # deprecated UpperCAmelCase__ = None # deprecated UpperCAmelCase__ = 10 << 20 # 10MB UpperCAmelCase__ = None class UpperCamelCase_ ( datasets.ArrowBasedBuilder ): '''simple docstring''' UpperCAmelCase__ = JsonConfig def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str: '''simple docstring''' if self.config.block_size is not None: logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''') A__ = self.config.block_size if self.config.use_threads is not True: logger.warning( '''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''') if self.config.newlines_in_values is not None: raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''') return datasets.DatasetInfo(features=self.config.features) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : List[Any]) ->Dict: '''simple docstring''' if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""") A__ = dl_manager.download_and_extract(self.config.data_files) if isinstance(UpperCAmelCase__ , (str, list, tuple)): A__ = data_files if isinstance(UpperCAmelCase__ , UpperCAmelCase__): A__ = [files] A__ = [dl_manager.iter_files(UpperCAmelCase__) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files})] A__ = [] for split_name, files in data_files.items(): if isinstance(UpperCAmelCase__ , UpperCAmelCase__): A__ = [files] A__ = [dl_manager.iter_files(UpperCAmelCase__) for file in files] splits.append(datasets.SplitGenerator(name=UpperCAmelCase__ , gen_kwargs={'''files''': files})) return splits def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : pa.Table) ->pa.Table: '''simple docstring''' if self.config.features is not None: # adding missing columns for column_name in set(self.config.features) - set(pa_table.column_names): A__ = self.config.features.arrow_schema.field(UpperCAmelCase__).type A__ = pa_table.append_column(UpperCAmelCase__ , pa.array([None] * len(UpperCAmelCase__) , type=UpperCAmelCase__)) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example A__ = table_cast(UpperCAmelCase__ , self.config.features.arrow_schema) return pa_table def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Tuple) ->str: '''simple docstring''' for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase__)): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(UpperCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: A__ = json.load(UpperCAmelCase__) # We keep only the field we are interested in A__ = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(UpperCAmelCase__ , (list, tuple)): A__ = set().union(*[row.keys() for row in dataset]) A__ = {col: [row.get(UpperCAmelCase__) for row in dataset] for col in keys} else: A__ = dataset A__ = pa.Table.from_pydict(UpperCAmelCase__) yield file_idx, self._cast_table(UpperCAmelCase__) # If the file has one json object per line else: with open(UpperCAmelCase__ , '''rb''') as f: A__ = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small A__ = max(self.config.chunksize // 32 , 16 << 10) A__ = ( self.config.encoding_errors if self.config.encoding_errors is not None else '''strict''' ) while True: A__ = f.read(self.config.chunksize) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(UpperCAmelCase__) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": A__ = batch.decode(self.config.encoding , errors=UpperCAmelCase__).encode('''utf-8''') try: while True: try: A__ = paj.read_json( io.BytesIO(UpperCAmelCase__) , read_options=paj.ReadOptions(block_size=UpperCAmelCase__)) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(UpperCAmelCase__ , pa.ArrowInvalid) and "straddling" not in str(UpperCAmelCase__) or block_size > len(UpperCAmelCase__) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( f"""Batch of {len(UpperCAmelCase__)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""") block_size *= 2 except pa.ArrowInvalid as e: try: with open( UpperCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: A__ = json.load(UpperCAmelCase__) except json.JSONDecodeError: logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""") raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(UpperCAmelCase__ , UpperCAmelCase__): # list is the only sequence type supported in JSON try: A__ = set().union(*[row.keys() for row in dataset]) A__ = {col: [row.get(UpperCAmelCase__) for row in dataset] for col in keys} A__ = pa.Table.from_pydict(UpperCAmelCase__) except (pa.ArrowInvalid, AttributeError) as e: logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""") raise ValueError(f"""Not able to read records in the JSON file at {file}.""") from None yield file_idx, self._cast_table(UpperCAmelCase__) break else: logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""") raise ValueError( f"""Not able to read records in the JSON file at {file}. """ f"""You should probably indicate the field of the JSON file containing your records. """ f"""This JSON file contain the following fields: {str(list(dataset.keys()))}. """ f"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase__) batch_idx += 1
14
0
"""simple docstring""" import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(lowercase ), """Tatoeba directory does not exist.""" ) class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @cached_property def A_ ( self ): _lowerCamelCase : Dict = tempfile.mkdtemp() return TatoebaConverter(save_dir=lowercase ) @slow def A_ ( self ): self.resolver.convert_models(['heb-eng'] ) @slow def A_ ( self ): _lowerCamelCase, _lowerCamelCase : Dict = self.resolver.write_model_card('opus-mt-he-en' , dry_run=lowercase ) assert mmeta["long_pair"] == "heb-eng"
96
import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch _lowerCamelCase : List[Any] = """sshleifer/bart-tiny-random""" _lowerCamelCase : List[Any] = """patrickvonplaten/t5-tiny-random""" @require_torch class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int: '''simple docstring''' return AutoConfig.from_pretrained(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any: '''simple docstring''' A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1) self.assertEqual(student.config.num_hidden_layers , 1) def SCREAMING_SNAKE_CASE ( self : int) ->Any: '''simple docstring''' A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]: '''simple docstring''' A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase__) self.assertEqual(student.config.encoder_layers , 1) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers) def SCREAMING_SNAKE_CASE ( self : Dict) ->int: '''simple docstring''' A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1) self.assertEqual(student.config.encoder_layers , 1) self.assertEqual(student.config.decoder_layers , 1) def SCREAMING_SNAKE_CASE ( self : str) ->List[Any]: '''simple docstring''' with self.assertRaises(UpperCAmelCase__): create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=UpperCAmelCase__ , d=UpperCAmelCase__)
14
0
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available from .timesteps import ( fastaa_timesteps, smartaa_timesteps, smartaa_timesteps, smartaaa_timesteps, smartaaa_timesteps, superaa_timesteps, superaa_timesteps, superaaa_timesteps, ) @dataclass class lowercase ( A__ ): """simple docstring""" _a = 42 _a = 42 _a = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_if import IFPipeline from .pipeline_if_imgaimg import IFImgaImgPipeline from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline from .pipeline_if_inpainting import IFInpaintingPipeline from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline from .pipeline_if_superresolution import IFSuperResolutionPipeline from .safety_checker import IFSafetyChecker from .watermark import IFWatermarker
97
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Dict=3 , UpperCAmelCase__ : List[Any]=30 , UpperCAmelCase__ : Any=400 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Optional[Any]=[0.5, 0.5, 0.5] , UpperCAmelCase__ : Any=[0.5, 0.5, 0.5] , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Optional[int]=1 / 255 , UpperCAmelCase__ : Optional[Any]=True , ) ->str: '''simple docstring''' A__ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333} A__ = parent A__ = batch_size A__ = num_channels A__ = min_resolution A__ = max_resolution A__ = do_resize A__ = size A__ = do_normalize A__ = image_mean A__ = image_std A__ = do_rescale A__ = rescale_factor A__ = do_pad def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]: '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int=False) ->Optional[Any]: '''simple docstring''' if not batched: A__ = image_inputs[0] if isinstance(UpperCAmelCase__ , Image.Image): A__ , A__ = image.size else: A__ , A__ = image.shape[1], image.shape[2] if w < h: A__ = int(self.size['''shortest_edge'''] * h / w) A__ = self.size['''shortest_edge'''] elif w > h: A__ = self.size['''shortest_edge'''] A__ = int(self.size['''shortest_edge'''] * w / h) else: A__ = self.size['''shortest_edge'''] A__ = self.size['''shortest_edge'''] else: A__ = [] for image in image_inputs: A__ , A__ = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) A__ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__: item[0])[0] A__ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__: item[1])[1] return expected_height, expected_width @require_torch @require_vision class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = DeformableDetrImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple: '''simple docstring''' A__ = DeformableDetrImageProcessingTester(self) @property def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(UpperCAmelCase__ , '''image_mean''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''image_std''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_rescale''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_pad''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''size''')) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int: '''simple docstring''' A__ = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333}) self.assertEqual(image_processor.do_pad , UpperCAmelCase__) A__ = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCAmelCase__) self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84}) self.assertEqual(image_processor.do_pad , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]: '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) # create random PIL images A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , Image.Image) # Test not batched input A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__) A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , np.ndarray) # Test not batched input A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE ( self : int) ->Tuple: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , torch.Tensor) # Test not batched input A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[str]: '''simple docstring''' A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''') with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''') as f: A__ = json.loads(f.read()) A__ = {'''image_id''': 39_769, '''annotations''': target} # encode them A__ = DeformableDetrImageProcessor() A__ = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , return_tensors='''pt''') # verify pixel values A__ = torch.Size([1, 3, 800, 1_066]) self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase__) A__ = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase__ , atol=1e-4)) # verify area A__ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase__)) # verify boxes A__ = torch.Size([6, 4]) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase__) A__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase__ , atol=1e-3)) # verify image_id A__ = torch.tensor([39_769]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase__)) # verify is_crowd A__ = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase__)) # verify class_labels A__ = torch.tensor([75, 75, 63, 65, 17, 17]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase__)) # verify orig_size A__ = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase__)) # verify size A__ = torch.tensor([800, 1_066]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase__)) @slow def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[int]: '''simple docstring''' A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''') with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''') as f: A__ = json.loads(f.read()) A__ = {'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target} A__ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''') # encode them A__ = DeformableDetrImageProcessor(format='''coco_panoptic''') A__ = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , masks_path=UpperCAmelCase__ , return_tensors='''pt''') # verify pixel values A__ = torch.Size([1, 3, 800, 1_066]) self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase__) A__ = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase__ , atol=1e-4)) # verify area A__ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase__)) # verify boxes A__ = torch.Size([6, 4]) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase__) A__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase__ , atol=1e-3)) # verify image_id A__ = torch.tensor([39_769]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase__)) # verify is_crowd A__ = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase__)) # verify class_labels A__ = torch.tensor([17, 17, 63, 75, 75, 93]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase__)) # verify masks A__ = 822_873 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCAmelCase__) # verify orig_size A__ = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase__)) # verify size A__ = torch.tensor([800, 1_066]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase__))
14
0
"""simple docstring""" import math_equivalence # From: git+https://github.com/hendrycks/math.git import datasets lowerCAmelCase__ : str = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n' lowerCAmelCase__ : List[str] = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n' lowerCAmelCase__ : Optional[Any] = r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n' @datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case ( datasets.Metric ): """simple docstring""" def __lowerCAmelCase ( self : Any ): return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { 'predictions': datasets.Value('string' ), 'references': datasets.Value('string' ), } ) ,homepage='https://github.com/hendrycks/math' ,codebase_urls=['https://github.com/hendrycks/math'] ,) def __lowerCAmelCase ( self : List[str] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Any ): UpperCAmelCase__ = 0.0 for i, j in zip(lowerCamelCase__ ,lowerCamelCase__ ): n_correct += 1.0 if math_equivalence.is_equiv(lowerCamelCase__ ,lowerCamelCase__ ) else 0.0 UpperCAmelCase__ = n_correct / len(lowerCamelCase__ ) return { "accuracy": accuracy, }
98
from __future__ import annotations import typing from collections.abc import Iterable import numpy as np _lowerCamelCase : str = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007 _lowerCamelCase : Tuple = typing.Union[np.floataa, int, float] # noqa: UP007 def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> VectorOut: """simple docstring""" return np.sqrt(np.sum((np.asarray(lowercase_ ) - np.asarray(lowercase_ )) ** 2 ) ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> VectorOut: """simple docstring""" return sum((va - va) ** 2 for va, va in zip(lowercase_ , lowercase_ ) ) ** (1 / 2) if __name__ == "__main__": def SCREAMING_SNAKE_CASE ( ) -> None: """simple docstring""" from timeit import timeit print('''Without Numpy''' ) print( timeit( '''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) ) print('''With Numpy''' ) print( timeit( '''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) ) benchmark()
14
0
from typing import List, Union import numpy as np from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, logging from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline lowercase : str = logging.get_logger(__name__) class A__ ( __UpperCAmelCase ): """simple docstring""" def __lowercase ( self , lowercase) -> List[str]: '''simple docstring''' if isinstance(lowercase , lowercase): a__ : int = [label.strip() for label in labels.split(',') if label.strip()] return labels def __call__( self , lowercase , lowercase , lowercase) -> Union[str, Any]: '''simple docstring''' if len(lowercase) == 0 or len(lowercase) == 0: raise ValueError('You must include at least one label and at least one sequence.') if hypothesis_template.format(labels[0]) == hypothesis_template: raise ValueError( ( 'The provided hypothesis_template "{}" was not able to be formatted with the target labels. ' 'Make sure the passed template includes formatting syntax such as {{}} where the label should go.' ).format(lowercase)) if isinstance(lowercase , lowercase): a__ : List[Any] = [sequences] a__ : List[Any] = [] for sequence in sequences: sequence_pairs.extend([[sequence, hypothesis_template.format(lowercase)] for label in labels]) return sequence_pairs, sequences @add_end_docstrings(__UpperCAmelCase ) class A__ ( __UpperCAmelCase ): """simple docstring""" def __init__( self , lowercase=ZeroShotClassificationArgumentHandler() , *lowercase , **lowercase) -> Dict: '''simple docstring''' a__ : List[Any] = args_parser super().__init__(*lowercase , **lowercase) if self.entailment_id == -1: logger.warning( 'Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to ' '-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.') @property def __lowercase ( self) -> Any: '''simple docstring''' for label, ind in self.model.config.labelaid.items(): if label.lower().startswith('entail'): return ind return -1 def __lowercase ( self , lowercase , lowercase=True , lowercase=True , lowercase=TruncationStrategy.ONLY_FIRST , **lowercase) -> Optional[int]: '''simple docstring''' a__ : Optional[int] = self.framework if self.tokenizer.pad_token is None: # Override for tokenizers not supporting padding logger.error( 'Tokenizer was not supporting padding necessary for zero-shot, attempting to use ' ' `pad_token=eos_token`') a__ : List[str] = self.tokenizer.eos_token try: a__ : Optional[int] = self.tokenizer( lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=lowercase , ) except Exception as e: if "too short" in str(lowercase): # tokenizers might yell that we want to truncate # to a value that is not even reached by the input. # In that case we don't want to truncate. # It seems there's not a really better way to catch that # exception. a__ : Any = self.tokenizer( lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , ) else: raise e return inputs def __lowercase ( self , **lowercase) -> str: '''simple docstring''' if kwargs.get('multi_class' , lowercase) is not None: a__ : str = kwargs['multi_class'] logger.warning( 'The `multi_class` argument has been deprecated and renamed to `multi_label`. ' '`multi_class` will be removed in a future version of Transformers.') a__ : List[str] = {} if "candidate_labels" in kwargs: a__ : Union[str, Any] = self._args_parser._parse_labels(kwargs['candidate_labels']) if "hypothesis_template" in kwargs: a__ : Dict = kwargs['hypothesis_template'] a__ : Any = {} if "multi_label" in kwargs: a__ : int = kwargs['multi_label'] return preprocess_params, {}, postprocess_params def __call__( self , lowercase , *lowercase , **lowercase , ) -> Dict: '''simple docstring''' if len(lowercase) == 0: pass elif len(lowercase) == 1 and "candidate_labels" not in kwargs: a__ : int = args[0] else: raise ValueError(F'Unable to understand extra arguments {args}') return super().__call__(lowercase , **lowercase) def __lowercase ( self , lowercase , lowercase=None , lowercase="This example is {}.") -> Optional[int]: '''simple docstring''' a__ , a__ : Any = self._args_parser(lowercase , lowercase , lowercase) for i, (candidate_label, sequence_pair) in enumerate(zip(lowercase , lowercase)): a__ : Dict = self._parse_and_tokenize([sequence_pair]) yield { "candidate_label": candidate_label, "sequence": sequences[0], "is_last": i == len(lowercase) - 1, **model_input, } def __lowercase ( self , lowercase) -> List[str]: '''simple docstring''' a__ : Any = inputs['candidate_label'] a__ : int = inputs['sequence'] a__ : Tuple = {k: inputs[k] for k in self.tokenizer.model_input_names} a__ : Optional[int] = self.model(**lowercase) a__ : Union[str, Any] = { 'candidate_label': candidate_label, 'sequence': sequence, 'is_last': inputs['is_last'], **outputs, } return model_outputs def __lowercase ( self , lowercase , lowercase=False) -> Dict: '''simple docstring''' a__ : Dict = [outputs['candidate_label'] for outputs in model_outputs] a__ : Optional[int] = [outputs['sequence'] for outputs in model_outputs] a__ : Any = np.concatenate([output['logits'].numpy() for output in model_outputs]) a__ : Optional[Any] = logits.shape[0] a__ : Dict = len(lowercase) a__ : List[Any] = N // n a__ : List[Any] = logits.reshape((num_sequences, n, -1)) if multi_label or len(lowercase) == 1: # softmax over the entailment vs. contradiction dim for each label independently a__ : str = self.entailment_id a__ : Any = -1 if entailment_id == 0 else 0 a__ : Optional[Any] = reshaped_outputs[..., [contradiction_id, entailment_id]] a__ : Optional[int] = np.exp(lowercase) / np.exp(lowercase).sum(-1 , keepdims=lowercase) a__ : Dict = scores[..., 1] else: # softmax the "entailment" logits over all candidate labels a__ : List[str] = reshaped_outputs[..., self.entailment_id] a__ : str = np.exp(lowercase) / np.exp(lowercase).sum(-1 , keepdims=lowercase) a__ : Union[str, Any] = list(reversed(scores[0].argsort())) return { "sequence": sequences[0], "labels": [candidate_labels[i] for i in top_inds], "scores": scores[0, top_inds].tolist(), }
99
from ...processing_utils import ProcessorMixin class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''SpeechT5FeatureExtractor''' UpperCAmelCase__ = '''SpeechT5Tokenizer''' def __init__( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple) ->Union[str, Any]: '''simple docstring''' super().__init__(UpperCAmelCase__ , UpperCAmelCase__) def __call__( self : Dict , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Any) ->Optional[Any]: '''simple docstring''' A__ = kwargs.pop('''audio''' , UpperCAmelCase__) A__ = kwargs.pop('''text''' , UpperCAmelCase__) A__ = kwargs.pop('''text_target''' , UpperCAmelCase__) A__ = kwargs.pop('''audio_target''' , UpperCAmelCase__) A__ = kwargs.pop('''sampling_rate''' , UpperCAmelCase__) if audio is not None and text is not None: raise ValueError( '''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''') if audio_target is not None and text_target is not None: raise ValueError( '''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''') if audio is None and audio_target is None and text is None and text_target is None: raise ValueError( '''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''') if audio is not None: A__ = self.feature_extractor(UpperCAmelCase__ , *UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , **UpperCAmelCase__) elif text is not None: A__ = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__) else: A__ = None if audio_target is not None: A__ = self.feature_extractor(audio_target=UpperCAmelCase__ , *UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , **UpperCAmelCase__) A__ = targets['''input_values'''] elif text_target is not None: A__ = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__) A__ = targets['''input_ids'''] else: A__ = None if inputs is None: return targets if targets is not None: A__ = labels A__ = targets.get('''attention_mask''') if decoder_attention_mask is not None: A__ = decoder_attention_mask return inputs def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : int) ->Optional[int]: '''simple docstring''' A__ = kwargs.pop('''input_values''' , UpperCAmelCase__) A__ = kwargs.pop('''input_ids''' , UpperCAmelCase__) A__ = kwargs.pop('''labels''' , UpperCAmelCase__) if input_values is not None and input_ids is not None: raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''') if input_values is None and input_ids is None and labels is None: raise ValueError( '''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''') if input_values is not None: A__ = self.feature_extractor.pad(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__) elif input_ids is not None: A__ = self.tokenizer.pad(UpperCAmelCase__ , **UpperCAmelCase__) else: A__ = None if labels is not None: if "input_ids" in labels or (isinstance(UpperCAmelCase__ , UpperCAmelCase__) and "input_ids" in labels[0]): A__ = self.tokenizer.pad(UpperCAmelCase__ , **UpperCAmelCase__) A__ = targets['''input_ids'''] else: A__ = self.feature_extractor.feature_size A__ = self.feature_extractor.num_mel_bins A__ = self.feature_extractor.pad(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__) A__ = feature_size_hack A__ = targets['''input_values'''] else: A__ = None if inputs is None: return targets if targets is not None: A__ = labels A__ = targets.get('''attention_mask''') if decoder_attention_mask is not None: A__ = decoder_attention_mask return inputs def SCREAMING_SNAKE_CASE ( self : Any , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Optional[Any]) ->Optional[Any]: '''simple docstring''' return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Union[str, Any]) ->Dict: '''simple docstring''' return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__)
14
0
"""simple docstring""" from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def _lowerCAmelCase ( UpperCamelCase_ ): __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = analyze_text(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = list(""" """ + ascii_lowercase ) # what is our total sum of probabilities. __SCREAMING_SNAKE_CASE = sum(single_char_strings.values() ) # one length string __SCREAMING_SNAKE_CASE = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: __SCREAMING_SNAKE_CASE = single_char_strings[ch] __SCREAMING_SNAKE_CASE = my_str / all_sum my_fir_sum += prob * math.loga(UpperCamelCase_ ) # entropy formula. # print entropy print(f"{round(-1 * my_fir_sum ):.1f}" ) # two len string __SCREAMING_SNAKE_CASE = sum(two_char_strings.values() ) __SCREAMING_SNAKE_CASE = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: __SCREAMING_SNAKE_CASE = cha + cha if sequence in two_char_strings: __SCREAMING_SNAKE_CASE = two_char_strings[sequence] __SCREAMING_SNAKE_CASE = int(UpperCamelCase_ ) / all_sum my_sec_sum += prob * math.loga(UpperCamelCase_ ) # print second entropy print(f"{round(-1 * my_sec_sum ):.1f}" ) # print the difference between them print(f"{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}" ) def _lowerCAmelCase ( UpperCamelCase_ ): __SCREAMING_SNAKE_CASE = Counter() # type: ignore __SCREAMING_SNAKE_CASE = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(UpperCamelCase_ ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def _lowerCAmelCase ( ): import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
100
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase : Tuple = logging.get_logger(__name__) _lowerCamelCase : str = { """microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""", } class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''git_vision_model''' def __init__( self : Any , UpperCAmelCase__ : Any=768 , UpperCAmelCase__ : int=3_072 , UpperCAmelCase__ : List[str]=12 , UpperCAmelCase__ : Dict=12 , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : List[Any]=224 , UpperCAmelCase__ : Union[str, Any]=16 , UpperCAmelCase__ : Union[str, Any]="quick_gelu" , UpperCAmelCase__ : Dict=1e-5 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Any=0.02 , **UpperCAmelCase__ : Any , ) ->Optional[int]: '''simple docstring''' super().__init__(**UpperCAmelCase__) A__ = hidden_size A__ = intermediate_size A__ = num_hidden_layers A__ = num_attention_heads A__ = num_channels A__ = patch_size A__ = image_size A__ = initializer_range A__ = attention_dropout A__ = layer_norm_eps A__ = hidden_act @classmethod def SCREAMING_SNAKE_CASE ( cls : Any , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : int) ->"PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(UpperCAmelCase__) A__ , A__ = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__) # get the vision config dict if we are loading from GITConfig if config_dict.get('''model_type''') == "git": A__ = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""") return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''git''' def __init__( self : Dict , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int=30_522 , UpperCAmelCase__ : Optional[int]=768 , UpperCAmelCase__ : Dict=6 , UpperCAmelCase__ : int=12 , UpperCAmelCase__ : List[str]=3_072 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=1_024 , UpperCAmelCase__ : List[str]=0.02 , UpperCAmelCase__ : Any=1e-12 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : List[Any]="absolute" , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : int=101 , UpperCAmelCase__ : Tuple=102 , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : List[str] , ) ->Any: '''simple docstring''' super().__init__(bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__) if vision_config is None: A__ = {} logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''') A__ = GitVisionConfig(**UpperCAmelCase__) A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = hidden_act A__ = intermediate_size A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = initializer_range A__ = layer_norm_eps A__ = position_embedding_type A__ = use_cache A__ = tie_word_embeddings A__ = num_image_with_embedding A__ = bos_token_id A__ = eos_token_id def SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]: '''simple docstring''' A__ = copy.deepcopy(self.__dict__) A__ = self.vision_config.to_dict() A__ = self.__class__.model_type return output
14
0
import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=False ): '''simple docstring''' try: lowercase = os.environ[key] except KeyError: # KEY isn't set, default to `default`. lowercase = default else: # KEY is set, convert it to True or False. try: lowercase = strtobool(lowerCAmelCase__ ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f'If set, {key} must be yes or no.' ) return _value lowercase__ :Tuple = parse_flag_from_env("RUN_SLOW", default=False) def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' return unittest.skip('''Test was skipped''' )(lowerCAmelCase__ ) def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(lowerCAmelCase__ ) def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(lowerCAmelCase__ ) def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(lowerCAmelCase__ ) def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(lowerCAmelCase__ ) def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(lowerCAmelCase__ ) def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' return unittest.skipUnless( is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(lowerCAmelCase__ ) def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(lowerCAmelCase__ ) def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(lowerCAmelCase__ ) def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(lowerCAmelCase__ ) def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(lowerCAmelCase__ ) def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(lowerCAmelCase__ ) def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(lowerCAmelCase__ ) def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(lowerCAmelCase__ ) def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(lowerCAmelCase__ ) def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(lowerCAmelCase__ ) def UpperCamelCase ( lowerCAmelCase__=None , lowerCAmelCase__=None ): '''simple docstring''' if test_case is None: return partial(lowerCAmelCase__ , version=lowerCAmelCase__ ) return unittest.skipUnless(is_torch_version('''>=''' , lowerCAmelCase__ ) , f'test requires torch version >= {version}' )(lowerCAmelCase__ ) def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(lowerCAmelCase__ ) def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(lowerCAmelCase__ ) def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(lowerCAmelCase__ ) lowercase__ :Union[str, Any] = ( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' return unittest.skipUnless( _atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(lowerCAmelCase__ ) class lowercase ( unittest.TestCase ): lowercase_ : Dict =True @classmethod def A__ ( cls): lowercase = tempfile.mkdtemp() @classmethod def A__ ( cls): if os.path.exists(cls.tmpdir): shutil.rmtree(cls.tmpdir) def A__ ( self): if self.clear_on_setup: for path in Path(self.tmpdir).glob('''**/*'''): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(A__) class lowercase ( unittest.TestCase ): def A__ ( self): super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class lowercase ( unittest.TestCase ): def A__ ( self ,A__): lowercase = mocks if isinstance(A__ ,(tuple, list)) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop) def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' lowercase = AcceleratorState() lowercase = tensor[None].clone().to(state.device ) lowercase = gather(lowerCAmelCase__ ).cpu() lowercase = tensor[0].cpu() for i in range(tensors.shape[0] ): if not torch.equal(tensors[i] , lowerCAmelCase__ ): return False return True class lowercase : def __init__( self ,A__ ,A__ ,A__): lowercase = returncode lowercase = stdout lowercase = stderr async def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): '''simple docstring''' while True: lowercase = await stream.readline() if line: callback(lowerCAmelCase__ ) else: break async def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=False , lowerCAmelCase__=False ): '''simple docstring''' if echo: print('''\nRunning: ''' , ''' '''.join(lowerCAmelCase__ ) ) lowercase = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=lowerCAmelCase__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCAmelCase__ , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) lowercase = [] lowercase = [] def tee(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="" ): lowercase = line.decode('''utf-8''' ).rstrip() sink.append(lowerCAmelCase__ ) if not quiet: print(lowerCAmelCase__ , lowerCAmelCase__ , file=lowerCAmelCase__ ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout , lambda lowerCAmelCase__ : tee(lowerCAmelCase__ , lowerCAmelCase__ , sys.stdout , label='''stdout:''' ) ) ), asyncio.create_task(_read_stream(p.stderr , lambda lowerCAmelCase__ : tee(lowerCAmelCase__ , lowerCAmelCase__ , sys.stderr , label='''stderr:''' ) ) ), ] , timeout=lowerCAmelCase__ , ) return _RunOutput(await p.wait() , lowerCAmelCase__ , lowerCAmelCase__ ) def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=180 , lowerCAmelCase__=False , lowerCAmelCase__=True ): '''simple docstring''' lowercase = asyncio.get_event_loop() lowercase = loop.run_until_complete( _stream_subprocess(lowerCAmelCase__ , env=lowerCAmelCase__ , stdin=lowerCAmelCase__ , timeout=lowerCAmelCase__ , quiet=lowerCAmelCase__ , echo=lowerCAmelCase__ ) ) lowercase = ''' '''.join(lowerCAmelCase__ ) if result.returncode > 0: lowercase = '''\n'''.join(result.stderr ) raise RuntimeError( f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n' f'The combined stderr from workers follows:\n{stderr}' ) return result class lowercase ( SCREAMING_SNAKE_CASE__ ): pass def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=False ): '''simple docstring''' try: lowercase = subprocess.check_output(lowerCAmelCase__ , stderr=subprocess.STDOUT ) if return_stdout: if hasattr(lowerCAmelCase__ , '''decode''' ): lowercase = output.decode('''utf-8''' ) return output except subprocess.CalledProcessError as e: raise SubprocessCallException( f'Command `{" ".join(lowerCAmelCase__ )}` failed with the following error:\n\n{e.output.decode()}' ) from e
101
import requests from bsa import BeautifulSoup def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str: """simple docstring""" A__ = BeautifulSoup(requests.get(lowercase_ , params=lowercase_ ).content , '''html.parser''' ) A__ = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} ) A__ = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' ) return anchors[2].get_text() if __name__ == "__main__": _lowerCamelCase : Optional[Any] = { """title""": ( """Precisely geometry controlled microsupercapacitors for ultrahigh areal """ """capacitance, volumetric capacitance, and energy density""" ), """journal""": """Chem. Mater.""", """volume""": 30, """pages""": """3979-3990""", """year""": 2018, """hl""": """en""", } print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
14
0
"""simple docstring""" from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import BaseOutput, is_torch_available, is_transformers_available @dataclass class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ =42 lowerCamelCase__ =42 if is_transformers_available() and is_torch_available(): from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
102
import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[Any]: """simple docstring""" A__ = StableDiffusionPipeline.from_pretrained(lowercase_ , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors A__ = load_file(lowercase_ ) A__ = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: A__ = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' ) A__ = pipeline.text_encoder else: A__ = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' ) A__ = pipeline.unet # find the target layer A__ = layer_infos.pop(0 ) while len(lowercase_ ) > -1: try: A__ = curr_layer.__getattr__(lowercase_ ) if len(lowercase_ ) > 0: A__ = layer_infos.pop(0 ) elif len(lowercase_ ) == 0: break except Exception: if len(lowercase_ ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: A__ = layer_infos.pop(0 ) A__ = [] if "lora_down" in key: pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) ) pair_keys.append(lowercase_ ) else: pair_keys.append(lowercase_ ) pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: A__ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) A__ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ ).unsqueeze(2 ).unsqueeze(3 ) else: A__ = state_dict[pair_keys[0]].to(torch.floataa ) A__ = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ ) # update visited list for item in pair_keys: visited.append(lowercase_ ) return pipeline if __name__ == "__main__": _lowerCamelCase : Tuple = argparse.ArgumentParser() parser.add_argument( """--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format.""" ) parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors""" ) parser.add_argument( """--lora_prefix_text_encoder""", default="""lora_te""", type=str, help="""The prefix of text encoder weight in safetensors""", ) parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""") parser.add_argument( """--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not.""" ) parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""") _lowerCamelCase : Tuple = parser.parse_args() _lowerCamelCase : List[Any] = args.base_model_path _lowerCamelCase : Optional[int] = args.checkpoint_path _lowerCamelCase : Dict = args.dump_path _lowerCamelCase : Optional[Any] = args.lora_prefix_unet _lowerCamelCase : Optional[int] = args.lora_prefix_text_encoder _lowerCamelCase : List[Any] = args.alpha _lowerCamelCase : int = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) _lowerCamelCase : Tuple = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
14
0
from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def UpperCamelCase( __UpperCamelCase : int ): # A local function to see if a dot lands in the circle. def is_in_circle(__UpperCamelCase : float ,__UpperCamelCase : float ) -> bool: lowerCAmelCase_ : int = sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle lowerCAmelCase_ : Optional[int] = mean( int(is_in_circle(uniform(-1.0 ,1.0 ) ,uniform(-1.0 ,1.0 ) ) ) for _ in range(__UpperCamelCase ) ) # The ratio of the area for circle to square is pi/4. lowerCAmelCase_ : str = proportion * 4 print(f"""The estimated value of pi is {pi_estimate}""" ) print(f"""The numpy value of pi is {pi}""" ) print(f"""The total error is {abs(pi - pi_estimate )}""" ) def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : Callable[[float], float] ,__UpperCamelCase : float = 0.0 ,__UpperCamelCase : float = 1.0 ,): return mean( function_to_integrate(uniform(__UpperCamelCase ,__UpperCamelCase ) ) for _ in range(__UpperCamelCase ) ) * (max_value - min_value) def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : float = 0.0 ,__UpperCamelCase : float = 1.0 ): def identity_function(__UpperCamelCase : float ) -> float: return x lowerCAmelCase_ : Optional[int] = area_under_curve_estimator( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) lowerCAmelCase_ : int = (max_value * max_value - min_value * min_value) / 2 print('''******************''' ) print(f"""Estimating area under y=x where x varies from {min_value} to {max_value}""" ) print(f"""Estimated value is {estimated_value}""" ) print(f"""Expected value is {expected_value}""" ) print(f"""Total error is {abs(estimated_value - expected_value )}""" ) print('''******************''' ) def UpperCamelCase( __UpperCamelCase : int ): def function_to_integrate(__UpperCamelCase : float ) -> float: return sqrt(4.0 - x * x ) lowerCAmelCase_ : Optional[int] = area_under_curve_estimator( __UpperCamelCase ,__UpperCamelCase ,0.0 ,2.0 ) print('''******************''' ) print('''Estimating pi using area_under_curve_estimator''' ) print(f"""Estimated value is {estimated_value}""" ) print(f"""Expected value is {pi}""" ) print(f"""Total error is {abs(estimated_value - pi )}""" ) print('''******************''' ) if __name__ == "__main__": import doctest doctest.testmod()
103
import os import pytest from transformers.dynamic_module_utils import get_imports _lowerCamelCase : Any = """ import os """ _lowerCamelCase : Optional[int] = """ def foo(): import os return False """ _lowerCamelCase : List[Any] = """ def foo(): def bar(): if True: import os return False return bar() """ _lowerCamelCase : List[Any] = """ import os try: import bar except ImportError: raise ValueError() """ _lowerCamelCase : Union[str, Any] = """ import os def foo(): try: import bar except ImportError: raise ValueError() """ _lowerCamelCase : List[Any] = """ import os try: import bar except (ImportError, AttributeError): raise ValueError() """ _lowerCamelCase : List[Any] = """ import os try: import bar except ImportError as e: raise ValueError() """ _lowerCamelCase : str = """ import os try: import bar except: raise ValueError() """ _lowerCamelCase : Optional[Any] = """ import os try: import bar import baz except ImportError: raise ValueError() """ _lowerCamelCase : Any = """ import os try: import bar import baz except ImportError: x = 1 raise ValueError() """ _lowerCamelCase : Dict = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize('''case''' , lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int: """simple docstring""" A__ = os.path.join(lowercase_ , '''test_file.py''' ) with open(lowercase_ , '''w''' ) as _tmp_file: _tmp_file.write(lowercase_ ) A__ = get_imports(lowercase_ ) assert parsed_imports == ["os"]
14
0
'''simple docstring''' import secrets from random import shuffle from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation def _A ( A__ = 8 ): """simple docstring""" __lowercase = ascii_letters + digits + punctuation return "".join(secrets.choice(A__ ) for _ in range(A__ ) ) def _A ( A__ , A__ ): """simple docstring""" i -= len(A__ ) __lowercase = i // 3 __lowercase = i % 3 # chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) + # random_number(digits, i / 3) + random_characters(punctuation, i / 3) __lowercase = ( chars_incl + random(A__ , quotient + remainder ) + random(A__ , A__ ) + random(A__ , A__ ) ) __lowercase = list(A__ ) shuffle(A__ ) return "".join(A__ ) # random is a generalised function for letters, characters and numbers def _A ( A__ , A__ ): """simple docstring""" return "".join(secrets.choice(A__ ) for _ in range(A__ ) ) def _A ( A__ , A__ ): """simple docstring""" pass # Put your code here... def _A ( A__ , A__ ): """simple docstring""" pass # Put your code here... def _A ( A__ , A__ ): """simple docstring""" pass # Put your code here... def _A ( A__ , A__ = 8 ): """simple docstring""" if len(A__ ) < min_length: # Your Password must be at least 8 characters long return False __lowercase = any(char in ascii_uppercase for char in password ) __lowercase = any(char in ascii_lowercase for char in password ) __lowercase = any(char in digits for char in password ) __lowercase = any(char in punctuation for char in password ) return upper and lower and num and spec_char # Passwords should contain UPPERCASE, lowerase # numbers, and special characters def _A ( ): """simple docstring""" __lowercase = int(input('''Please indicate the max length of your password: ''' ).strip() ) __lowercase = input( '''Please indicate the characters that must be in your password: ''' ).strip() print('''Password generated:''' , password_generator(A__ ) ) print( '''Alternative Password generated:''' , alternative_password_generator(A__ , A__ ) , ) print('''[If you are thinking of using this passsword, You better save it.]''' ) if __name__ == "__main__": main()
104
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int: """simple docstring""" return int(input_a == input_a == 0 ) def SCREAMING_SNAKE_CASE ( ) -> None: """simple docstring""" print('''Truth Table of NOR Gate:''' ) print('''| Input 1 | Input 2 | Output |''' ) print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" ) print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" ) print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" ) print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" ) if __name__ == "__main__": import doctest doctest.testmod() main()
14
0
"""simple docstring""" import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process a : List[str] = logging.getLogger(__name__) def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] , _lowercase : Union[str, Any] ) ->int: '''simple docstring''' return (preds == labels).mean() @dataclass class __UpperCamelCase : lowerCamelCase : str =field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) lowerCamelCase : Optional[str] =field( default=a__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) lowerCamelCase : Optional[str] =field( default=a__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) lowerCamelCase : Optional[str] =field( default=a__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) @dataclass class __UpperCamelCase : lowerCamelCase : str =field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} ) lowerCamelCase : str =field(metadata={"""help""": """Should contain the data files for the task."""} ) lowerCamelCase : int =field( default=128 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) lowerCamelCase : bool =field( default=a__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) def _SCREAMING_SNAKE_CASE ( ) ->str: '''simple docstring''' a : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) a, a, a : Union[str, Any] = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" " --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s" , _lowercase ) # Set seed set_seed(training_args.seed ) try: a : Union[str, Any] = processors[data_args.task_name]() a : Any = processor.get_labels() a : Optional[Any] = len(_lowercase ) except KeyError: raise ValueError("Task not found: %s" % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. a : List[Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowercase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) a : Optional[int] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) a : str = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , ) # Get datasets a : str = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=_lowercase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) a : Any = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=_lowercase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(_lowercase : EvalPrediction ) -> Dict: a : int = np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(_lowercase , p.label_ids )} # Data collator a : List[Any] = DataCollatorWithPadding(_lowercase , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer a : Dict = Trainer( model=_lowercase , args=_lowercase , train_dataset=_lowercase , eval_dataset=_lowercase , compute_metrics=_lowercase , data_collator=_lowercase , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation a : Dict = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) a : str = trainer.evaluate() a : List[str] = os.path.join(training_args.output_dir , "eval_results.txt" ) if trainer.is_world_master(): with open(_lowercase , "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in result.items(): logger.info(" %s = %s" , _lowercase , _lowercase ) writer.write("%s = %s\n" % (key, value) ) results.update(_lowercase ) return results def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] ) ->Any: '''simple docstring''' main() if __name__ == "__main__": main()
105
import os import sys import unittest _lowerCamelCase : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) _lowerCamelCase : Any = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""") _lowerCamelCase : str = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""") class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple: '''simple docstring''' A__ = get_test_to_tester_mapping(UpperCAmelCase__) A__ = get_test_to_tester_mapping(UpperCAmelCase__) A__ = {'''BertModelTest''': '''BertModelTester'''} A__ = { '''BlipModelTest''': '''BlipModelTester''', '''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''', '''BlipTextModelTest''': '''BlipTextModelTester''', '''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''', '''BlipVQAModelTest''': '''BlipVQAModelTester''', '''BlipVisionModelTest''': '''BlipVisionModelTester''', } self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__) self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[Any]: '''simple docstring''' A__ = get_model_to_test_mapping(UpperCAmelCase__) A__ = get_model_to_test_mapping(UpperCAmelCase__) A__ = { '''BertForMaskedLM''': ['''BertModelTest'''], '''BertForMultipleChoice''': ['''BertModelTest'''], '''BertForNextSentencePrediction''': ['''BertModelTest'''], '''BertForPreTraining''': ['''BertModelTest'''], '''BertForQuestionAnswering''': ['''BertModelTest'''], '''BertForSequenceClassification''': ['''BertModelTest'''], '''BertForTokenClassification''': ['''BertModelTest'''], '''BertLMHeadModel''': ['''BertModelTest'''], '''BertModel''': ['''BertModelTest'''], } A__ = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''], '''BlipModel''': ['''BlipModelTest'''], '''BlipTextModel''': ['''BlipTextModelTest'''], '''BlipVisionModel''': ['''BlipVisionModelTest'''], } self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__) self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->str: '''simple docstring''' A__ = get_model_to_tester_mapping(UpperCAmelCase__) A__ = get_model_to_tester_mapping(UpperCAmelCase__) A__ = { '''BertForMaskedLM''': ['''BertModelTester'''], '''BertForMultipleChoice''': ['''BertModelTester'''], '''BertForNextSentencePrediction''': ['''BertModelTester'''], '''BertForPreTraining''': ['''BertModelTester'''], '''BertForQuestionAnswering''': ['''BertModelTester'''], '''BertForSequenceClassification''': ['''BertModelTester'''], '''BertForTokenClassification''': ['''BertModelTester'''], '''BertLMHeadModel''': ['''BertModelTester'''], '''BertModel''': ['''BertModelTester'''], } A__ = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''], '''BlipModel''': ['''BlipModelTester'''], '''BlipTextModel''': ['''BlipTextModelTester'''], '''BlipVisionModel''': ['''BlipVisionModelTester'''], } self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__) self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
14
0
"""simple docstring""" import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel __UpperCamelCase : int = logging.getLogger(__name__) def __SCREAMING_SNAKE_CASE ( A_ , A_ ): # save results if os.path.exists(A_ ): if os.path.exists(os.path.join(A_ , '''config.json''' ) ) and os.path.isfile( os.path.join(A_ , '''config.json''' ) ): os.remove(os.path.join(A_ , '''config.json''' ) ) if os.path.exists(os.path.join(A_ , '''pytorch_model.bin''' ) ) and os.path.isfile( os.path.join(A_ , '''pytorch_model.bin''' ) ): os.remove(os.path.join(A_ , '''pytorch_model.bin''' ) ) else: os.makedirs(A_ ) model.save_pretrained(A_ ) def __SCREAMING_SNAKE_CASE ( A_ , A_=False ): lowerCAmelCase__ : Optional[Any] = 2 if unlogit: lowerCAmelCase__ : Union[str, Any] = torch.pow(A_ , A_ ) lowerCAmelCase__ : Optional[Any] = p * torch.log(A_ ) lowerCAmelCase__ : List[Any] = 0 return -plogp.sum(dim=-1 ) def __SCREAMING_SNAKE_CASE ( A_ ): logger.info('''lv, h >\t''' + '''\t'''.join(f'{x + 1}' for x in range(len(A_ ) ) ) ) for row in range(len(A_ ) ): if tensor.dtype != torch.long: logger.info(f'layer {row + 1}:\t' + '''\t'''.join(f'{x:.5f}' for x in tensor[row].cpu().data ) ) else: logger.info(f'layer {row + 1}:\t' + '''\t'''.join(f'{x:d}' for x in tensor[row].cpu().data ) ) def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_=True , A_=True , A_=None , A_=False ): lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = model.config.num_hidden_layers, model.config.num_attention_heads lowerCAmelCase__ : Dict = torch.zeros(A_ , A_ ).to(args.device ) lowerCAmelCase__ : int = torch.zeros(A_ , A_ ).to(args.device ) if head_mask is None: lowerCAmelCase__ : Union[str, Any] = torch.ones(A_ , A_ ).to(args.device ) head_mask.requires_grad_(requires_grad=A_ ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: lowerCAmelCase__ : Union[str, Any] = None lowerCAmelCase__ : Optional[int] = 0.0 lowerCAmelCase__ : Optional[int] = 0.0 for step, inputs in enumerate(tqdm(A_ , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ): lowerCAmelCase__ : Any = tuple(t.to(args.device ) for t in inputs ) ((lowerCAmelCase__) ,) : List[Any] = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) lowerCAmelCase__ : Any = model(A_ , labels=A_ , head_mask=A_ ) # (loss), lm_logits, presents, (all hidden_states), (attentions) lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Dict = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(A_ ): lowerCAmelCase__ : Dict = entropy(attn.detach() , A_ ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(A_ ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: lowerCAmelCase__ : Any = 2 lowerCAmelCase__ : Dict = torch.pow(torch.pow(A_ , A_ ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-2_0 if not args.dont_normalize_global_importance: lowerCAmelCase__ : List[Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('''Attention entropies''' ) print_ad_tensor(A_ ) if compute_importance: logger.info('''Head importance scores''' ) print_ad_tensor(A_ ) logger.info('''Head ranked by importance scores''' ) lowerCAmelCase__ : str = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) lowerCAmelCase__ : Optional[int] = torch.arange( head_importance.numel() , device=args.device ) lowerCAmelCase__ : int = head_ranks.view_as(A_ ) print_ad_tensor(A_ ) return attn_entropy, head_importance, total_loss def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ): lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : List[str] = compute_heads_importance(A_ , A_ , A_ , compute_entropy=A_ ) lowerCAmelCase__ : Union[str, Any] = 1 / loss # instead of downsteam score use the LM loss logger.info('''Pruning: original score: %f, threshold: %f''' , A_ , original_score * args.masking_threshold ) lowerCAmelCase__ : Union[str, Any] = torch.ones_like(A_ ) lowerCAmelCase__ : List[str] = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) lowerCAmelCase__ : int = original_score while current_score >= original_score * args.masking_threshold: lowerCAmelCase__ : Union[str, Any] = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads lowerCAmelCase__ : str = float('''Inf''' ) lowerCAmelCase__ : List[Any] = head_importance.view(-1 ).sort()[1] if len(A_ ) <= num_to_mask: print('''BREAK BY num_to_mask''' ) break # mask heads lowerCAmelCase__ : List[Any] = current_heads_to_mask[:num_to_mask] logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) ) lowerCAmelCase__ : int = new_head_mask.view(-1 ) lowerCAmelCase__ : Optional[int] = 0.0 lowerCAmelCase__ : Union[str, Any] = new_head_mask.view_as(A_ ) lowerCAmelCase__ : Tuple = new_head_mask.clone().detach() print_ad_tensor(A_ ) # Compute metric and head importance again lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = compute_heads_importance( A_ , A_ , A_ , compute_entropy=A_ , head_mask=A_ ) lowerCAmelCase__ : Tuple = 1 / loss logger.info( '''Masking: current score: %f, remaining heads %d (%.1f percents)''' , A_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , ) logger.info('''Final head mask''' ) print_ad_tensor(A_ ) np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() ) return head_mask def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ): lowerCAmelCase__ : Optional[Any] = datetime.now() lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = compute_heads_importance( A_ , A_ , A_ , compute_entropy=A_ , compute_importance=A_ , head_mask=A_ ) lowerCAmelCase__ : Optional[Any] = 1 / loss lowerCAmelCase__ : Tuple = datetime.now() - before_time lowerCAmelCase__ : int = sum(p.numel() for p in model.parameters() ) lowerCAmelCase__ : List[Any] = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(A_ ) ) } for k, v in heads_to_prune.items(): if isinstance(A_ , A_ ): lowerCAmelCase__ : int = [ v, ] assert sum(len(A_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(A_ ) lowerCAmelCase__ : List[Any] = sum(p.numel() for p in model.parameters() ) lowerCAmelCase__ : Any = datetime.now() lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : int = compute_heads_importance( A_ , A_ , A_ , compute_entropy=A_ , compute_importance=A_ , head_mask=A_ , actually_pruned=A_ , ) lowerCAmelCase__ : int = 1 / loss lowerCAmelCase__ : Dict = datetime.now() - before_time logger.info( '''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , A_ , A_ , pruned_num_params / original_num_params * 1_00 , ) logger.info('''Pruning: score with masking: %f score with pruning: %f''' , A_ , A_ ) logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 1_00 ) save_model(A_ , args.output_dir ) def __SCREAMING_SNAKE_CASE ( ): lowerCAmelCase__ : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--data_dir''' , default=A_ , type=A_ , required=A_ , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , ) parser.add_argument( '''--model_name_or_path''' , default=A_ , type=A_ , required=A_ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--output_dir''' , default=A_ , type=A_ , required=A_ , help='''The output directory where the model predictions and checkpoints will be written.''' , ) # Other parameters parser.add_argument( '''--config_name''' , default='''''' , type=A_ , help='''Pretrained config name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--tokenizer_name''' , default='''''' , type=A_ , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--cache_dir''' , default=A_ , type=A_ , help='''Where do you want to store the pre-trained models downloaded from s3''' , ) parser.add_argument( '''--data_subset''' , type=A_ , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' ) parser.add_argument( '''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' ) parser.add_argument( '''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' ) parser.add_argument( '''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' ) parser.add_argument( '''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , ) parser.add_argument( '''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' ) parser.add_argument( '''--masking_threshold''' , default=0.9 , type=A_ , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , ) parser.add_argument( '''--masking_amount''' , default=0.1 , type=A_ , help='''Amount to heads to masking at each masking step.''' ) parser.add_argument('''--metric_name''' , default='''acc''' , type=A_ , help='''Metric to use for head masking.''' ) parser.add_argument( '''--max_seq_length''' , default=1_28 , type=A_ , help=( '''The maximum total input sequence length after WordPiece tokenization. \n''' '''Sequences longer than this will be truncated, sequences shorter padded.''' ) , ) parser.add_argument('''--batch_size''' , default=1 , type=A_ , help='''Batch size.''' ) parser.add_argument('''--seed''' , type=A_ , default=42 ) parser.add_argument('''--local_rank''' , type=A_ , default=-1 , help='''local_rank for distributed training on gpus''' ) parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' ) parser.add_argument('''--server_ip''' , type=A_ , default='''''' , help='''Can be used for distant debugging.''' ) parser.add_argument('''--server_port''' , type=A_ , default='''''' , help='''Can be used for distant debugging.''' ) lowerCAmelCase__ : Optional[Any] = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('''Waiting for debugger attach''' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=A_ ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: lowerCAmelCase__ : Union[str, Any] = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' ) lowerCAmelCase__ : str = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) lowerCAmelCase__ : Dict = torch.device('''cuda''' , args.local_rank ) lowerCAmelCase__ : Union[str, Any] = 1 torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) lowerCAmelCase__ : List[str] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: lowerCAmelCase__ : Dict = nn.parallel.DistributedDataParallel( A_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=A_ ) elif args.n_gpu > 1: lowerCAmelCase__ : List[Any] = nn.DataParallel(A_ ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=A_ ) torch.save(A_ , os.path.join(args.output_dir , '''run_args.bin''' ) ) logger.info('''Training/evaluation parameters %s''' , A_ ) # Prepare dataset lowerCAmelCase__ : str = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) lowerCAmelCase__ : Union[str, Any] = (torch.from_numpy(A_ ),) lowerCAmelCase__ : Tuple = TensorDataset(*A_ ) lowerCAmelCase__ : Optional[int] = RandomSampler(A_ ) lowerCAmelCase__ : Dict = DataLoader(A_ , sampler=A_ , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(A_ , A_ , A_ ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: lowerCAmelCase__ : Tuple = mask_heads(A_ , A_ , A_ ) prune_heads(A_ , A_ , A_ , A_ ) if __name__ == "__main__": main()
106
import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class UpperCamelCase_ : '''simple docstring''' def __init__( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int = 13 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 128 , UpperCAmelCase__ : Optional[Any]=[16, 32, 64, 128] , UpperCAmelCase__ : int = 7 , UpperCAmelCase__ : int = 4 , UpperCAmelCase__ : int = 37 , UpperCAmelCase__ : str = "gelu" , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : int = 10 , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 128 , UpperCAmelCase__ : List[int] = [2, 2, 2, 2] , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , ) ->List[Any]: '''simple docstring''' A__ = parent A__ = batch_size A__ = image_size A__ = patch_size A__ = num_channels A__ = is_training A__ = use_labels A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = type_sequence_label_size A__ = initializer_range A__ = encoder_stride A__ = num_attention_outputs A__ = embed_dim A__ = embed_dim + 1 A__ = resolution A__ = depths A__ = hidden_sizes A__ = dim A__ = mlp_expansion_ratio def SCREAMING_SNAKE_CASE ( self : List[Any]) ->str: '''simple docstring''' A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size) A__ = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE ( self : int) ->str: '''simple docstring''' return EfficientFormerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict) ->Dict: '''simple docstring''' A__ = TFEfficientFormerModel(config=UpperCAmelCase__) A__ = model(UpperCAmelCase__ , training=UpperCAmelCase__) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str) ->Union[str, Any]: '''simple docstring''' A__ = self.type_sequence_label_size A__ = TFEfficientFormerForImageClassification(UpperCAmelCase__) A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__ , training=UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) # test greyscale images A__ = 1 A__ = TFEfficientFormerForImageClassification(UpperCAmelCase__) A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def SCREAMING_SNAKE_CASE ( self : int) ->List[str]: '''simple docstring''' A__ = self.prepare_config_and_inputs() A__ , A__ , A__ = config_and_inputs A__ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = ( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) UpperCAmelCase__ = ( { '''feature-extraction''': TFEfficientFormerModel, '''image-classification''': ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[str]: '''simple docstring''' A__ = TFEfficientFormerModelTester(self) A__ = ConfigTester( self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37) def SCREAMING_SNAKE_CASE ( self : int) ->Any: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''EfficientFormer does not use inputs_embeds''') def SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict: '''simple docstring''' pass @unittest.skip(reason='''EfficientFormer does not support input and output embeddings''') def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]: '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[Any]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(UpperCAmelCase__) A__ = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : str) ->Any: '''simple docstring''' def check_hidden_states_output(UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict): A__ = model_class(UpperCAmelCase__) A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__) A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A__ = getattr( self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1) self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__) if hasattr(self.model_tester , '''encoder_seq_length'''): A__ = self.model_tester.encoder_seq_length if hasattr(self.model_tester , '''chunk_length''') and self.model_tester.chunk_length > 1: A__ = seq_length * self.model_tester.chunk_length else: A__ = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , ) if config.is_encoder_decoder: A__ = outputs.decoder_hidden_states self.asseretIsInstance(UpperCAmelCase__ , (list, tuple)) self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__) A__ = getattr(self.model_tester , '''seq_length''' , UpperCAmelCase__) A__ = getattr(self.model_tester , '''decoder_seq_length''' , UpperCAmelCase__) self.assertListEqual( list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , ) A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = True check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A__ = True check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict=False) ->int: '''simple docstring''' A__ = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__) @unittest.skip(reason='''EfficientFormer does not implement masked image modeling yet''') def SCREAMING_SNAKE_CASE ( self : str) ->str: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Any) ->Tuple: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__) @slow def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]: '''simple docstring''' for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = TFEfficientFormerModel.from_pretrained(UpperCAmelCase__) self.assertIsNotNone(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Any) ->str: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = True A__ = getattr(self.model_tester , '''seq_length''' , UpperCAmelCase__) A__ = getattr(self.model_tester , '''encoder_seq_length''' , UpperCAmelCase__) A__ = getattr(self.model_tester , '''key_length''' , UpperCAmelCase__) A__ = getattr(self.model_tester , '''chunk_length''' , UpperCAmelCase__) if chunk_length is not None and hasattr(self.model_tester , '''num_hashes'''): A__ = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: A__ = True A__ = False A__ = True A__ = model_class(UpperCAmelCase__) A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__) A__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(UpperCAmelCase__) , self.model_tester.num_attention_outputs) # check that output_attentions also work using config del inputs_dict["output_attentions"] A__ = True A__ = model_class(UpperCAmelCase__) A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__) A__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(UpperCAmelCase__) , self.model_tester.num_attention_outputs) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , ) else: self.assertListEqual( list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , ) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model A__ = model_class(UpperCAmelCase__) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes A__ = { key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=UpperCAmelCase__) for key, val in model.input_signature.items() if key in model.dummy_inputs } A__ = model(UpperCAmelCase__) self.assertTrue(outputs_dict is not None) def SCREAMING_SNAKE_CASE ( ) -> Any: """simple docstring""" A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: '''simple docstring''' return ( EfficientFormerImageProcessor.from_pretrained('''snap-research/efficientformer-l1-300''') if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any: '''simple docstring''' A__ = TFEfficientFormerForImageClassification.from_pretrained('''snap-research/efficientformer-l1-300''') A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''tf''') # forward pass A__ = model(**UpperCAmelCase__ , training=UpperCAmelCase__) # verify the logits A__ = tf.TensorShape((1, 1_000)) self.assertEqual(outputs.logits.shape , UpperCAmelCase__) A__ = tf.constant([-0.0555, 0.4825, -0.0852]) self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4)) @slow def SCREAMING_SNAKE_CASE ( self : Dict) ->int: '''simple docstring''' A__ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( '''snap-research/efficientformer-l1-300''') A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''tf''') # forward pass A__ = model(**UpperCAmelCase__ , training=UpperCAmelCase__) # verify the logits A__ = tf.TensorShape((1, 1_000)) self.assertEqual(outputs.logits.shape , UpperCAmelCase__) A__ = tf.constant([-0.1312, 0.4353, -1.0499]) self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4))
14
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __lowerCAmelCase : List[str] = { 'configuration_resnet': ['RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ResNetConfig', 'ResNetOnnxConfig'] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Dict = [ 'RESNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'ResNetForImageClassification', 'ResNetModel', 'ResNetPreTrainedModel', 'ResNetBackbone', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : int = [ 'TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFResNetForImageClassification', 'TFResNetModel', 'TFResNetPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[Any] = [ 'FlaxResNetForImageClassification', 'FlaxResNetModel', 'FlaxResNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys __lowerCAmelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure)
107
from __future__ import annotations def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> tuple[float, list[float]]: """simple docstring""" A__ = list(range(len(lowercase_ ) ) ) A__ = [v / w for v, w in zip(lowercase_ , lowercase_ )] index.sort(key=lambda lowercase_ : ratio[i] , reverse=lowercase_ ) A__ = 0 A__ = [0] * len(lowercase_ ) for i in index: if weight[i] <= capacity: A__ = 1 max_value += value[i] capacity -= weight[i] else: A__ = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
14
0
"""simple docstring""" from urllib.parse import quote import pytest from datasets.utils.hub import hf_hub_url @pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] ) @pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] ) @pytest.mark.parametrize("revision" , [None, "v2"] ) def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' lowerCAmelCase : Optional[Any] = hf_hub_url(repo_id=SCREAMING_SNAKE_CASE , path=SCREAMING_SNAKE_CASE , revision=SCREAMING_SNAKE_CASE ) assert url == f"""https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(SCREAMING_SNAKE_CASE )}"""
108
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[Any]: """simple docstring""" A__ = args.log_outputs A__ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] ) # load metric A__ = load_metric('''wer''' ) A__ = load_metric('''cer''' ) # compute metrics A__ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) A__ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) # print & log results A__ = f"""WER: {wer_result}\nCER: {cer_result}""" print(lowercase_ ) with open(f"""{dataset_id}_eval_results.txt""" , '''w''' ) as f: f.write(lowercase_ ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: A__ = f"""log_{dataset_id}_predictions.txt""" A__ = f"""log_{dataset_id}_targets.txt""" with open(lowercase_ , '''w''' ) as p, open(lowercase_ , '''w''' ) as t: # mapping function to write output def write_to_file(lowercase_ , lowercase_ ): p.write(f"""{i}""" + '''\n''' ) p.write(batch['''prediction'''] + '''\n''' ) t.write(f"""{i}""" + '''\n''' ) t.write(batch['''target'''] + '''\n''' ) result.map(lowercase_ , with_indices=lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str: """simple docstring""" A__ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training A__ = re.sub(lowercase_ , '''''' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! A__ = ['''\n\n''', '''\n''', ''' ''', ''' '''] for t in token_sequences_to_ignore: A__ = ''' '''.join(text.split(lowercase_ ) ) return text def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]: """simple docstring""" A__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowercase_ ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor A__ = AutoFeatureExtractor.from_pretrained(args.model_id ) A__ = feature_extractor.sampling_rate # resample audio A__ = dataset.cast_column('''audio''' , Audio(sampling_rate=lowercase_ ) ) # load eval pipeline if args.device is None: A__ = 0 if torch.cuda.is_available() else -1 A__ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(lowercase_ ): A__ = asr( batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) A__ = prediction['''text'''] A__ = normalize_text(batch['''sentence'''] ) return batch # run inference on all examples A__ = dataset.map(lowercase_ , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(lowercase_ , lowercase_ ) if __name__ == "__main__": _lowerCamelCase : List[Any] = argparse.ArgumentParser() parser.add_argument( """--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers""" ) parser.add_argument( """--dataset""", type=str, required=True, help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""", ) parser.add_argument( """--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice""" ) parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""") parser.add_argument( """--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds.""" ) parser.add_argument( """--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second.""" ) parser.add_argument( """--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis.""" ) parser.add_argument( """--device""", type=int, default=None, help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""", ) _lowerCamelCase : str = parser.parse_args() main(args)
14
0
"""simple docstring""" from __future__ import annotations A: Dict = tuple[int, int, int] A: Optional[Any] = tuple[str, str, str] # used alphabet -------------------------- # from string.ascii_uppercase A: List[str] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" # -------------------------- default selection -------------------------- # rotors -------------------------- A: str = "EGZWVONAHDCLFQMSIPJBYUKXTR" A: List[str] = "FOBHMDKEXQNRAULPGSJVTYICZW" A: Any = "ZJXESIUQLHAVRMDOYGTNFWPBKC" # reflector -------------------------- A: str = { "A": "N", "N": "A", "B": "O", "O": "B", "C": "P", "P": "C", "D": "Q", "Q": "D", "E": "R", "R": "E", "F": "S", "S": "F", "G": "T", "T": "G", "H": "U", "U": "H", "I": "V", "V": "I", "J": "W", "W": "J", "K": "X", "X": "K", "L": "Y", "Y": "L", "M": "Z", "Z": "M", } # -------------------------- extra rotors -------------------------- A: Dict = "RMDJXFUWGISLHVTCQNKYPBEZOA" A: Optional[int] = "SGLCPQWZHKXAREONTFBVIYJUDM" A: Optional[int] = "HVSICLTYKQUBXDWAJZOMFGPREN" A: List[str] = "RZWQHFMVDBKICJLNTUXAGYPSOE" A: Optional[int] = "LFKIJODBEGAMQPXVUHYSTCZRWN" A: Optional[Any] = "KOAEGVDHXPQZMLFTYWJNBRCIUS" def _snake_case ( UpperCamelCase : RotorPositionT , UpperCamelCase : RotorSelectionT , UpperCamelCase : str ): # Checks if there are 3 unique rotors if (unique_rotsel := len(set(UpperCamelCase ) )) < 3: UpperCAmelCase : Union[str, Any] = F"Please use 3 unique rotors (not {unique_rotsel})" raise Exception(UpperCamelCase ) # Checks if rotor positions are valid UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = rotpos if not 0 < rotorposa <= len(UpperCamelCase ): UpperCAmelCase : Optional[Any] = F"First rotor position is not within range of 1..26 ({rotorposa}" raise ValueError(UpperCamelCase ) if not 0 < rotorposa <= len(UpperCamelCase ): UpperCAmelCase : List[Any] = F"Second rotor position is not within range of 1..26 ({rotorposa})" raise ValueError(UpperCamelCase ) if not 0 < rotorposa <= len(UpperCamelCase ): UpperCAmelCase : List[Any] = F"Third rotor position is not within range of 1..26 ({rotorposa})" raise ValueError(UpperCamelCase ) # Validates string and returns dict UpperCAmelCase : Optional[Any] = _plugboard(UpperCamelCase ) return rotpos, rotsel, pbdict def _snake_case ( UpperCamelCase : str ): # tests the input string if it # a) is type string # b) has even length (so pairs can be made) if not isinstance(UpperCamelCase , UpperCamelCase ): UpperCAmelCase : List[str] = F"Plugboard setting isn't type string ({type(UpperCamelCase )})" raise TypeError(UpperCamelCase ) elif len(UpperCamelCase ) % 2 != 0: UpperCAmelCase : Union[str, Any] = F"Odd number of symbols ({len(UpperCamelCase )})" raise Exception(UpperCamelCase ) elif pbstring == "": return {} pbstring.replace(""" """ , """""" ) # Checks if all characters are unique UpperCAmelCase : str = set() for i in pbstring: if i not in abc: UpperCAmelCase : str = F"'{i}' not in list of symbols" raise Exception(UpperCamelCase ) elif i in tmppbl: UpperCAmelCase : Dict = F"Duplicate symbol ({i})" raise Exception(UpperCamelCase ) else: tmppbl.add(UpperCamelCase ) del tmppbl # Created the dictionary UpperCAmelCase : Dict = {} for j in range(0 , len(UpperCamelCase ) - 1 , 2 ): UpperCAmelCase : Optional[int] = pbstring[j + 1] UpperCAmelCase : List[str] = pbstring[j] return pb def _snake_case ( UpperCamelCase : str , UpperCamelCase : RotorPositionT , UpperCamelCase : RotorSelectionT = (rotora, rotora, rotora) , UpperCamelCase : str = "" , ): UpperCAmelCase : Optional[int] = text.upper() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = _validator( UpperCamelCase , UpperCamelCase , plugb.upper() ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = rotor_position UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = rotor_selection rotorposa -= 1 rotorposa -= 1 rotorposa -= 1 UpperCAmelCase : int = [] # encryption/decryption process -------------------------- for symbol in text: if symbol in abc: # 1st plugboard -------------------------- if symbol in plugboard: UpperCAmelCase : Optional[int] = plugboard[symbol] # rotor ra -------------------------- UpperCAmelCase : List[str] = abc.index(UpperCamelCase ) + rotorposa UpperCAmelCase : Tuple = rotora[index % len(UpperCamelCase )] # rotor rb -------------------------- UpperCAmelCase : List[Any] = abc.index(UpperCamelCase ) + rotorposa UpperCAmelCase : Optional[int] = rotora[index % len(UpperCamelCase )] # rotor rc -------------------------- UpperCAmelCase : Dict = abc.index(UpperCamelCase ) + rotorposa UpperCAmelCase : Union[str, Any] = rotora[index % len(UpperCamelCase )] # reflector -------------------------- # this is the reason you don't need another machine to decipher UpperCAmelCase : Union[str, Any] = reflector[symbol] # 2nd rotors UpperCAmelCase : str = abc[rotora.index(UpperCamelCase ) - rotorposa] UpperCAmelCase : int = abc[rotora.index(UpperCamelCase ) - rotorposa] UpperCAmelCase : Optional[Any] = abc[rotora.index(UpperCamelCase ) - rotorposa] # 2nd plugboard if symbol in plugboard: UpperCAmelCase : Any = plugboard[symbol] # moves/resets rotor positions rotorposa += 1 if rotorposa >= len(UpperCamelCase ): UpperCAmelCase : Dict = 0 rotorposa += 1 if rotorposa >= len(UpperCamelCase ): UpperCAmelCase : Tuple = 0 rotorposa += 1 if rotorposa >= len(UpperCamelCase ): UpperCAmelCase : List[str] = 0 # else: # pass # Error could be also raised # raise ValueError( # 'Invalid symbol('+repr(symbol)+')') result.append(UpperCamelCase ) return "".join(UpperCamelCase ) if __name__ == "__main__": A: Dict = "This is my Python script that emulates the Enigma machine from WWII." A: Union[str, Any] = (1, 1, 1) A: int = "pictures" A: List[Any] = (rotora, rotora, rotora) A: Any = enigma(message, rotor_pos, rotor_sel, pb) print("Encrypted message:", en) print("Decrypted message:", enigma(en, rotor_pos, rotor_sel, pb))
109
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) _lowerCamelCase : int = { """configuration_blip""": [ """BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BlipConfig""", """BlipTextConfig""", """BlipVisionConfig""", ], """processing_blip""": ["""BlipProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Tuple = ["""BlipImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : List[Any] = [ """BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """BlipModel""", """BlipPreTrainedModel""", """BlipForConditionalGeneration""", """BlipForQuestionAnswering""", """BlipVisionModel""", """BlipTextModel""", """BlipForImageTextRetrieval""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Optional[Any] = [ """TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBlipModel""", """TFBlipPreTrainedModel""", """TFBlipForConditionalGeneration""", """TFBlipForQuestionAnswering""", """TFBlipVisionModel""", """TFBlipTextModel""", """TFBlipForImageTextRetrieval""", ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys _lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
14
0
import os import sys import unittest lowerCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, 'utils')) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path lowerCAmelCase = os.path.join(git_repo_path, 'src', 'diffusers') class _a ( unittest.TestCase ): def lowerCamelCase_ ( self: Union[str, Any] ) -> List[Any]: """simple docstring""" lowercase__ = find_backend(''' if not is_torch_available():''' ) self.assertEqual(UpperCamelCase_ , '''torch''' ) # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") lowercase__ = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' ) self.assertEqual(UpperCamelCase_ , '''torch_and_transformers''' ) # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") lowercase__ = find_backend( ''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' ) self.assertEqual(UpperCamelCase_ , '''torch_and_transformers_and_onnx''' ) def lowerCamelCase_ ( self: Any ) -> Optional[Any]: """simple docstring""" lowercase__ = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('''torch''' , UpperCamelCase_ ) self.assertIn('''torch_and_transformers''' , UpperCamelCase_ ) self.assertIn('''flax_and_transformers''' , UpperCamelCase_ ) self.assertIn('''torch_and_transformers_and_onnx''' , UpperCamelCase_ ) # Likewise, we can't assert on the exact content of a key self.assertIn('''UNet2DModel''' , objects['''torch'''] ) self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] ) self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] ) self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] ) self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] ) self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] ) def lowerCamelCase_ ( self: Optional[Any] ) -> Optional[int]: """simple docstring""" lowercase__ = create_dummy_object('''CONSTANT''' , '''\'torch\'''' ) self.assertEqual(UpperCamelCase_ , '''\nCONSTANT = None\n''' ) lowercase__ = create_dummy_object('''function''' , '''\'torch\'''' ) self.assertEqual( UpperCamelCase_ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' ) lowercase__ = ''' class FakeClass(metaclass=DummyObject): _backends = \'torch\' def __init__(self, *args, **kwargs): requires_backends(self, \'torch\') @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, \'torch\') @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, \'torch\') ''' lowercase__ = create_dummy_object('''FakeClass''' , '''\'torch\'''' ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase_ ( self: Any ) -> List[Any]: """simple docstring""" lowercase__ = '''# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, ["torch"]) class FakeClass(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) ''' lowercase__ = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} ) self.assertEqual(dummy_files['''torch'''] , UpperCamelCase_ )
110
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCamelCase : List[str] = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : List[Any] = [ """VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""", """ViTMSNModel""", """ViTMSNForImageClassification""", """ViTMSNPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys _lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
14
0
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class __magic_name__ ( unittest.TestCase): @slow def UpperCAmelCase__ ( self : int ) -> Optional[int]: '''simple docstring''' UpperCamelCase__ : int = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' ) UpperCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained('''google/mt5-small''' ) UpperCamelCase__ : int = tokenizer('''Hello there''' , return_tensors='''tf''' ).input_ids UpperCamelCase__ : Optional[int] = tokenizer('''Hi I am''' , return_tensors='''tf''' ).input_ids UpperCamelCase__ : str = model(UpperCAmelCase__ , labels=UpperCAmelCase__ ).loss UpperCamelCase__ : str = -tf.math.reduce_mean(UpperCAmelCase__ ).numpy() UpperCamelCase__ : Dict = -21.22_8168 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
146
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> "list[int]": """simple docstring""" if upper_limit < 0: raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' ) A__ = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 A__ = 1 if upper_limit > 0: A__ = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(lowercase_ ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""") print("""\n*** Enter -1 at any time to quit ***""") print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""") try: while True: _lowerCamelCase : List[Any] = int(input().strip()) if N < 0: print("""\n********* Goodbye!! ************""") break else: print(F'''The Catalan numbers from 0 through {N} are:''') print(catalan_numbers(N)) print("""Try another upper limit for the sequence: """, end="""""") except (NameError, ValueError): print("""\n********* Invalid input, goodbye! ************\n""") import doctest doctest.testmod()
14
0
'''simple docstring''' import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def lowercase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str]=10 ): """simple docstring""" __UpperCAmelCase : List[Any] = [] for _ in range(lowercase_ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def lowercase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : Any=10 ): """simple docstring""" __UpperCAmelCase : List[str] = [] for step in range(lowercase_ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: __UpperCAmelCase : Any = os.path.join(lowercase_ , """schedule.bin""" ) torch.save(scheduler.state_dict() , lowercase_ ) __UpperCAmelCase : List[Any] = torch.load(lowercase_ ) scheduler.load_state_dict(lowercase_ ) return lrs @require_torch class _A ( unittest.TestCase ): def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) ) for a, b in zip(UpperCAmelCase__ , UpperCAmelCase__ ): self.assertAlmostEqual(UpperCAmelCase__ , UpperCAmelCase__ , delta=UpperCAmelCase__ ) def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : int = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase__ ) __UpperCAmelCase : Union[str, Any] = torch.tensor([0.4, 0.2, -0.5] ) __UpperCAmelCase : int = nn.MSELoss() # No warmup, constant schedule, no gradient clipping __UpperCAmelCase : Any = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 ) for _ in range(100 ): __UpperCAmelCase : List[Any] = criterion(UpperCAmelCase__ , UpperCAmelCase__ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase__ ) __UpperCAmelCase : int = torch.tensor([0.4, 0.2, -0.5] ) __UpperCAmelCase : int = nn.MSELoss() # No warmup, constant schedule, no gradient clipping __UpperCAmelCase : List[str] = Adafactor( params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCAmelCase__ , weight_decay=0.0 , relative_step=UpperCAmelCase__ , scale_parameter=UpperCAmelCase__ , warmup_init=UpperCAmelCase__ , ) for _ in range(1_000 ): __UpperCAmelCase : int = criterion(UpperCAmelCase__ , UpperCAmelCase__ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) @require_torch class _A ( unittest.TestCase ): _SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(50 , 50 ) if is_torch_available() else None _SCREAMING_SNAKE_CASE : Tuple = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None _SCREAMING_SNAKE_CASE : List[Any] = 10 def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ) -> Any: '''simple docstring''' self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) ) for a, b in zip(UpperCAmelCase__ , UpperCAmelCase__ ): self.assertAlmostEqual(UpperCAmelCase__ , UpperCAmelCase__ , delta=UpperCAmelCase__ , msg=UpperCAmelCase__ ) def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : Any = {"""num_warmup_steps""": 2, """num_training_steps""": 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) __UpperCAmelCase : List[str] = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {"""num_warmup_steps""": 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, """num_cycles""": 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, """power""": 2.0, """lr_end""": 1E-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {"""num_warmup_steps""": 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): __UpperCAmelCase , __UpperCAmelCase : str = data __UpperCAmelCase : Union[str, Any] = scheduler_func(self.optimizer , **UpperCAmelCase__ ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) __UpperCAmelCase : List[str] = unwrap_schedule(UpperCAmelCase__ , self.num_steps ) self.assertListAlmostEqual( UpperCAmelCase__ , UpperCAmelCase__ , tol=1E-2 , msg=f'failed for {scheduler_func} in normal scheduler' , ) __UpperCAmelCase : Any = scheduler_func(self.optimizer , **UpperCAmelCase__ ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(UpperCAmelCase__ ) # wrap to test picklability of the schedule __UpperCAmelCase : List[str] = unwrap_and_save_reload_schedule(UpperCAmelCase__ , self.num_steps ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ , msg=f'failed for {scheduler_func} in save and reload' ) class _A : def __init__( self , __UpperCAmelCase ) -> Tuple: '''simple docstring''' __UpperCAmelCase : str = fn def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]: '''simple docstring''' return self.fn(*UpperCAmelCase__ , **UpperCAmelCase__ ) @classmethod def __A ( self , __UpperCAmelCase ) -> str: '''simple docstring''' __UpperCAmelCase : Dict = list(map(self , scheduler.lr_lambdas ) )
254
import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict: """simple docstring""" A__ = args.pruning_method A__ = args.threshold A__ = args.model_name_or_path.rstrip('''/''' ) A__ = args.target_model_path print(f"""Load fine-pruned model from {model_name_or_path}""" ) A__ = torch.load(os.path.join(lowercase_ , '''pytorch_model.bin''' ) ) A__ = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: A__ = tensor print(f"""Copied layer {name}""" ) elif "classifier" in name or "qa_output" in name: A__ = tensor print(f"""Copied layer {name}""" ) elif "bias" in name: A__ = tensor print(f"""Copied layer {name}""" ) else: if pruning_method == "magnitude": A__ = MagnitudeBinarizer.apply(inputs=lowercase_ , threshold=lowercase_ ) A__ = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "topK": if "mask_scores" in name: continue A__ = name[:-6] A__ = model[f"""{prefix_}mask_scores"""] A__ = TopKBinarizer.apply(lowercase_ , lowercase_ ) A__ = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue A__ = name[:-6] A__ = model[f"""{prefix_}mask_scores"""] A__ = ThresholdBinarizer.apply(lowercase_ , lowercase_ , lowercase_ ) A__ = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "l0": if "mask_scores" in name: continue A__ = name[:-6] A__ = model[f"""{prefix_}mask_scores"""] A__ , A__ = -0.1, 1.1 A__ = torch.sigmoid(lowercase_ ) A__ = s * (r - l) + l A__ = s_bar.clamp(min=0.0 , max=1.0 ) A__ = tensor * mask print(f"""Pruned layer {name}""" ) else: raise ValueError('''Unknown pruning method''' ) if target_model_path is None: A__ = os.path.join( os.path.dirname(lowercase_ ) , f"""bertarized_{os.path.basename(lowercase_ )}""" ) if not os.path.isdir(lowercase_ ): shutil.copytree(lowercase_ , lowercase_ ) print(f"""\nCreated folder {target_model_path}""" ) torch.save(lowercase_ , os.path.join(lowercase_ , '''pytorch_model.bin''' ) ) print('''\nPruned model saved! See you later!''' ) if __name__ == "__main__": _lowerCamelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument( """--pruning_method""", choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""], type=str, required=True, help=( """Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,""" """ sigmoied_threshold = Soft movement pruning)""" ), ) parser.add_argument( """--threshold""", type=float, required=False, help=( """For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.""" """For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.""" """Not needed for `l0`""" ), ) parser.add_argument( """--model_name_or_path""", type=str, required=True, help="""Folder containing the model that was previously fine-pruned""", ) parser.add_argument( """--target_model_path""", default=None, type=str, required=False, help="""Folder containing the model that was previously fine-pruned""", ) _lowerCamelCase : int = parser.parse_args() main(args)
14
0
"""simple docstring""" import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset __snake_case = pd.read_csv( '''https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/''' '''position_salaries.csv''' ) __snake_case = dataset.iloc[:, 1:2].values __snake_case = dataset.iloc[:, 2].values __snake_case = train_test_split(X, y, test_size=0.2, random_state=0) __snake_case = PolynomialFeatures(degree=4) __snake_case = poly_reg.fit_transform(X) __snake_case = LinearRegression() pol_reg.fit(X_poly, y) def A_ ( ): """simple docstring""" plt.scatter(lowercase_, lowercase_, color='''red''' ) plt.plot(lowercase_, pol_reg.predict(poly_reg.fit_transform(lowercase_ ) ), color='''blue''' ) plt.title('''Truth or Bluff (Linear Regression)''' ) plt.xlabel('''Position level''' ) plt.ylabel('''Salary''' ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
320
_lowerCamelCase : Optional[int] = 65521 def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int: """simple docstring""" A__ = 1 A__ = 0 for plain_chr in plain_text: A__ = (a + ord(lowercase_ )) % MOD_ADLER A__ = (b + a) % MOD_ADLER return (b << 16) | a
14
0
import inspect import os import torch from transformers import AutoModel from transformers.testing_utils import mockenv_context from transformers.trainer_utils import set_seed import accelerate from accelerate.accelerator import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils.testing import ( AccelerateTestCase, TempDirTestCase, execute_subprocess_async, require_cuda, require_fsdp, require_multi_gpu, slow, ) from accelerate.utils.constants import ( FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_SHARDING_STRATEGY, FSDP_STATE_DICT_TYPE, ) from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin from accelerate.utils.other import patch_environment set_seed(42) A__ = """bert-base-cased""" A__ = """fp16""" A__ = """bf16""" A__ = [FPaa, BFaa] @require_fsdp @require_cuda class a ( UpperCAmelCase__ ): def __lowerCamelCase ( self :List[Any] ): super().setUp() snake_case__ : List[str] = dict( ACCELERATE_USE_FSDP='''true''' ,MASTER_ADDR='''localhost''' ,MASTER_PORT='''10999''' ,RANK='''0''' ,LOCAL_RANK='''0''' ,WORLD_SIZE='''1''' ,) def __lowerCamelCase ( self :str ): from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy for i, strategy in enumerate(UpperCAmelCase__ ): snake_case__ : Optional[int] = self.dist_env.copy() snake_case__ : Optional[Any] = F"""{i + 1}""" snake_case__ : Optional[Any] = strategy with mockenv_context(**UpperCAmelCase__ ): snake_case__ : List[Any] = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.sharding_strategy ,ShardingStrategy(i + 1 ) ) def __lowerCamelCase ( self :Union[str, Any] ): from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch for i, prefetch_policy in enumerate(UpperCAmelCase__ ): snake_case__ : Union[str, Any] = self.dist_env.copy() snake_case__ : int = prefetch_policy with mockenv_context(**UpperCAmelCase__ ): snake_case__ : Tuple = FullyShardedDataParallelPlugin() if prefetch_policy == "NO_PREFETCH": self.assertIsNone(fsdp_plugin.backward_prefetch ) else: self.assertEqual(fsdp_plugin.backward_prefetch ,BackwardPrefetch(i + 1 ) ) def __lowerCamelCase ( self :Tuple ): from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType for i, state_dict_type in enumerate(UpperCAmelCase__ ): snake_case__ : Optional[int] = self.dist_env.copy() snake_case__ : Optional[int] = state_dict_type with mockenv_context(**UpperCAmelCase__ ): snake_case__ : int = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.state_dict_type ,StateDictType(i + 1 ) ) if state_dict_type == "FULL_STATE_DICT": self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu ) self.assertTrue(fsdp_plugin.state_dict_config.ranka_only ) def __lowerCamelCase ( self :str ): snake_case__ : Tuple = AutoModel.from_pretrained(UpperCAmelCase__ ) for policy in FSDP_AUTO_WRAP_POLICY: snake_case__ : Optional[int] = self.dist_env.copy() snake_case__ : Dict = policy if policy == "TRANSFORMER_BASED_WRAP": snake_case__ : Union[str, Any] = '''BertLayer''' elif policy == "SIZE_BASED_WRAP": snake_case__ : Any = '''2000''' with mockenv_context(**UpperCAmelCase__ ): snake_case__ : Any = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(UpperCAmelCase__ ) if policy == "NO_WRAP": self.assertIsNone(fsdp_plugin.auto_wrap_policy ) else: self.assertIsNotNone(fsdp_plugin.auto_wrap_policy ) snake_case__ : str = self.dist_env.copy() snake_case__ : List[Any] = '''TRANSFORMER_BASED_WRAP''' snake_case__ : Union[str, Any] = '''T5Layer''' with mockenv_context(**UpperCAmelCase__ ): snake_case__ : Optional[int] = FullyShardedDataParallelPlugin() with self.assertRaises(UpperCAmelCase__ ) as cm: fsdp_plugin.set_auto_wrap_policy(UpperCAmelCase__ ) self.assertTrue('''Could not find the transformer layer class to wrap in the model.''' in str(cm.exception ) ) snake_case__ : Union[str, Any] = self.dist_env.copy() snake_case__ : int = '''SIZE_BASED_WRAP''' snake_case__ : List[Any] = '''0''' with mockenv_context(**UpperCAmelCase__ ): snake_case__ : Optional[Any] = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(UpperCAmelCase__ ) self.assertIsNone(fsdp_plugin.auto_wrap_policy ) def __lowerCamelCase ( self :Optional[int] ): from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler for mp_dtype in dtypes: snake_case__ : List[str] = self.dist_env.copy() snake_case__ : Dict = mp_dtype with mockenv_context(**UpperCAmelCase__ ): snake_case__ : Dict = Accelerator() if mp_dtype == "fp16": snake_case__ : str = torch.floataa elif mp_dtype == "bf16": snake_case__ : str = torch.bfloataa snake_case__ : Any = MixedPrecision(param_dtype=UpperCAmelCase__ ,reduce_dtype=UpperCAmelCase__ ,buffer_dtype=UpperCAmelCase__ ) self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy ,UpperCAmelCase__ ) if mp_dtype == FPaa: self.assertTrue(isinstance(accelerator.scaler ,UpperCAmelCase__ ) ) elif mp_dtype == BFaa: self.assertIsNone(accelerator.scaler ) AcceleratorState._reset_state(UpperCAmelCase__ ) def __lowerCamelCase ( self :Dict ): from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload for flag in [True, False]: snake_case__ : Optional[int] = self.dist_env.copy() snake_case__ : Any = str(UpperCAmelCase__ ).lower() with mockenv_context(**UpperCAmelCase__ ): snake_case__ : Tuple = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.cpu_offload ,CPUOffload(offload_params=UpperCAmelCase__ ) ) @require_fsdp @require_multi_gpu @slow class a ( UpperCAmelCase__ ): def __lowerCamelCase ( self :str ): super().setUp() snake_case__ : str = 0.82 snake_case__ : List[str] = [ '''fsdp_shard_grad_op_transformer_based_wrap''', '''fsdp_full_shard_transformer_based_wrap''', ] snake_case__ : List[str] = { '''multi_gpu_fp16''': 3_2_0_0, '''fsdp_shard_grad_op_transformer_based_wrap_fp16''': 2_0_0_0, '''fsdp_full_shard_transformer_based_wrap_fp16''': 1_9_0_0, # Disabling below test as it overwhelms the RAM memory usage # on CI self-hosted runner leading to tests getting killed. # "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang } snake_case__ : Union[str, Any] = 1_6_0 snake_case__ : Any = 1_6_0 snake_case__ : Tuple = inspect.getfile(accelerate.test_utils ) snake_case__ : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps'''] ) def __lowerCamelCase ( self :str ): snake_case__ : List[Any] = os.path.join(self.test_scripts_folder ,'''test_performance.py''' ) snake_case__ : Optional[int] = ['''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', '''--use_fsdp'''] for config in self.performance_configs: snake_case__ : Dict = cmd.copy() for i, strategy in enumerate(UpperCAmelCase__ ): if strategy.lower() in config: cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" ) break if "fp32" in config: cmd_config.append('''--mixed_precision=no''' ) else: cmd_config.append('''--mixed_precision=fp16''' ) if "cpu_offload" in config: cmd_config.append('''--fsdp_offload_params=True''' ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in config: cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""" ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' ) elif policy == "SIZE_BASED_WRAP": cmd_config.append('''--fsdp_min_num_params=2000''' ) cmd_config.extend( [ self.test_file_path, F"""--output_dir={self.tmpdir}""", F"""--performance_lower_bound={self.performance_lower_bound}""", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(UpperCAmelCase__ ,env=os.environ.copy() ) def __lowerCamelCase ( self :List[str] ): snake_case__ : Optional[int] = os.path.join(self.test_scripts_folder ,'''test_checkpointing.py''' ) snake_case__ : Optional[Any] = [ '''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', '''--use_fsdp''', '''--mixed_precision=fp16''', '''--fsdp_transformer_layer_cls_to_wrap=BertLayer''', ] for i, strategy in enumerate(UpperCAmelCase__ ): snake_case__ : int = cmd.copy() cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" ) if strategy != "FULL_SHARD": continue snake_case__ : Optional[Any] = len(UpperCAmelCase__ ) for state_dict_type in FSDP_STATE_DICT_TYPE: snake_case__ : str = cmd_config[:state_dict_config_index] cmd_config.append(F"""--fsdp_state_dict_type={state_dict_type}""" ) cmd_config.extend( [ self.test_file_path, F"""--output_dir={self.tmpdir}""", '''--partial_train_epoch=1''', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(UpperCAmelCase__ ,env=os.environ.copy() ) snake_case__ : int = cmd_config[:-1] snake_case__ : Dict = os.path.join(self.tmpdir ,'''epoch_0''' ) cmd_config.extend( [ F"""--resume_from_checkpoint={resume_from_checkpoint}""", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(UpperCAmelCase__ ,env=os.environ.copy() ) def __lowerCamelCase ( self :Optional[int] ): snake_case__ : List[str] = os.path.join(self.test_scripts_folder ,'''test_peak_memory_usage.py''' ) snake_case__ : List[str] = [ '''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', ] for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items(): snake_case__ : Tuple = cmd.copy() if "fp16" in spec: cmd_config.extend(['''--mixed_precision=fp16'''] ) else: cmd_config.extend(['''--mixed_precision=no'''] ) if "multi_gpu" in spec: continue else: cmd_config.extend(['''--use_fsdp'''] ) for i, strategy in enumerate(UpperCAmelCase__ ): if strategy.lower() in spec: cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" ) break if "cpu_offload" in spec: cmd_config.append('''--fsdp_offload_params=True''' ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in spec: cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""" ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' ) elif policy == "SIZE_BASED_WRAP": cmd_config.append('''--fsdp_min_num_params=2000''' ) cmd_config.extend( [ self.test_file_path, F"""--output_dir={self.tmpdir}""", F"""--peak_memory_upper_bound={peak_mem_upper_bound}""", F"""--n_train={self.n_train}""", F"""--n_val={self.n_val}""", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(UpperCAmelCase__ ,env=os.environ.copy() )
230
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer _lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) _lowerCamelCase : Tuple = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} _lowerCamelCase : Union[str, Any] = { """vocab_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } _lowerCamelCase : str = { """vocab_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } _lowerCamelCase : str = { """vocab_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json""" ), }, } _lowerCamelCase : Any = { """facebook/dpr-ctx_encoder-single-nq-base""": 512, """facebook/dpr-ctx_encoder-multiset-base""": 512, } _lowerCamelCase : List[str] = { """facebook/dpr-question_encoder-single-nq-base""": 512, """facebook/dpr-question_encoder-multiset-base""": 512, } _lowerCamelCase : Tuple = { """facebook/dpr-reader-single-nq-base""": 512, """facebook/dpr-reader-multiset-base""": 512, } _lowerCamelCase : Optional[Any] = { """facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True}, } _lowerCamelCase : Optional[int] = { """facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True}, } _lowerCamelCase : Optional[Any] = { """facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True}, } class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ = DPRContextEncoderTokenizer class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ = DPRQuestionEncoderTokenizer _lowerCamelCase : int = collections.namedtuple( """DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""] ) _lowerCamelCase : Any = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""]) _lowerCamelCase : Dict = r""" Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. """ @add_start_docstrings(UpperCAmelCase__ ) class UpperCamelCase_ : '''simple docstring''' def __call__( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Union[bool, str] = False , UpperCAmelCase__ : Union[bool, str] = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Optional[bool] = None , **UpperCAmelCase__ : Optional[int] , ) ->BatchEncoding: '''simple docstring''' if titles is None and texts is None: return super().__call__( UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , ) elif titles is None or texts is None: A__ = titles if texts is None else texts return super().__call__( UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , ) A__ = titles if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [titles] A__ = texts if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [texts] A__ = len(UpperCAmelCase__) A__ = questions if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [questions] * n_passages assert len(UpperCAmelCase__) == len( UpperCAmelCase__), f"""There should be as many titles than texts but got {len(UpperCAmelCase__)} titles and {len(UpperCAmelCase__)} texts.""" A__ = super().__call__(UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__)['''input_ids'''] A__ = super().__call__(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__)['''input_ids'''] A__ = { '''input_ids''': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(UpperCAmelCase__ , UpperCAmelCase__) ] } if return_attention_mask is not False: A__ = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids]) A__ = attention_mask return self.pad(UpperCAmelCase__ , padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : BatchEncoding , UpperCAmelCase__ : DPRReaderOutput , UpperCAmelCase__ : int = 16 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 4 , ) ->List[DPRSpanPrediction]: '''simple docstring''' A__ = reader_input['''input_ids'''] A__ , A__ , A__ = reader_output[:3] A__ = len(UpperCAmelCase__) A__ = sorted(range(UpperCAmelCase__) , reverse=UpperCAmelCase__ , key=relevance_logits.__getitem__) A__ = [] for doc_id in sorted_docs: A__ = list(input_ids[doc_id]) # assuming question & title information is at the beginning of the sequence A__ = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: A__ = sequence_ids.index(self.pad_token_id) else: A__ = len(UpperCAmelCase__) A__ = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCAmelCase__ , top_spans=UpperCAmelCase__ , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCAmelCase__ , start_index=UpperCAmelCase__ , end_index=UpperCAmelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1]) , )) if len(UpperCAmelCase__) >= num_spans: break return nbest_spans_predictions[:num_spans] def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , ) ->List[DPRSpanPrediction]: '''simple docstring''' A__ = [] for start_index, start_score in enumerate(UpperCAmelCase__): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]): scores.append(((start_index, start_index + answer_length), start_score + end_score)) A__ = sorted(UpperCAmelCase__ , key=lambda UpperCAmelCase__: x[1] , reverse=UpperCAmelCase__) A__ = [] for (start_index, end_index), score in scores: assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]""" A__ = end_index - start_index + 1 assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}""" if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals): continue chosen_span_intervals.append((start_index, end_index)) if len(UpperCAmelCase__) == top_spans: break return chosen_span_intervals @add_end_docstrings(UpperCAmelCase__ ) class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = READER_PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ = ['''input_ids''', '''attention_mask'''] UpperCAmelCase__ = DPRReaderTokenizer
14
0
"""simple docstring""" from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = False ): '''simple docstring''' if radian_mode: return [magnitude * cos(lowercase_ ), magnitude * sin(lowercase_ )] return [magnitude * cos(radians(lowercase_ ) ), magnitude * sin(radians(lowercase_ ) )] def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 10**-1 ): '''simple docstring''' __lowerCAmelCase = cross(lowercase_ , lowercase_ ) __lowerCAmelCase = sum(lowercase_ ) return abs(lowercase_ ) < eps if __name__ == "__main__": # Test to check if it works A : Optional[Any] = array( [ polar_force(718.4, 1_8_0 - 3_0), polar_force(879.54, 4_5), polar_force(1_0_0, -9_0), ] ) A : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg A : Union[str, Any] = array( [ polar_force(3_0 * 9.81, 1_5), polar_force(2_1_5, 1_8_0 - 4_5), polar_force(2_6_4, 9_0 - 3_0), ] ) A : Dict = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg A : Dict = array([[0, -2_0_0_0], [0, -1_2_0_0], [0, 1_5_6_0_0], [0, -1_2_4_0_0]]) A : Optional[Any] = array([[0, 0], [6, 0], [1_0, 0], [1_2, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
57
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase : Any = logging.get_logger(__name__) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''encoder-decoder''' UpperCAmelCase__ = True def __init__( self : List[str] , **UpperCAmelCase__ : Union[str, Any]) ->List[Any]: '''simple docstring''' super().__init__(**UpperCAmelCase__) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" A__ = kwargs.pop('''encoder''') A__ = encoder_config.pop('''model_type''') A__ = kwargs.pop('''decoder''') A__ = decoder_config.pop('''model_type''') from ..auto.configuration_auto import AutoConfig A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__) A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__) A__ = True @classmethod def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Union[str, Any]) ->PretrainedConfig: '''simple docstring''' logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''') A__ = True A__ = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]: '''simple docstring''' A__ = copy.deepcopy(self.__dict__) A__ = self.encoder.to_dict() A__ = self.decoder.to_dict() A__ = self.__class__.model_type return output
14
0
"""simple docstring""" import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class lowerCamelCase (UpperCAmelCase__ ,unittest.TestCase ): lowerCamelCase__ : Optional[Any] = BarthezTokenizer lowerCamelCase__ : Tuple = BarthezTokenizerFast lowerCamelCase__ : Optional[int] = True lowerCamelCase__ : Optional[Any] = True def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict: super().setUp() SCREAMING_SNAKE_CASE__ = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=UpperCAmelCase__ ) SCREAMING_SNAKE_CASE__ = tokenizer def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ = """<pad>""" SCREAMING_SNAKE_CASE__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ ) def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple: SCREAMING_SNAKE_CASE__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<s>""" ) self.assertEqual(vocab_keys[1] , """<pad>""" ) self.assertEqual(vocab_keys[-1] , """<mask>""" ) self.assertEqual(len(UpperCAmelCase__ ) , 1_0_1_1_2_2 ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any: self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2 ) @require_torch def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str: SCREAMING_SNAKE_CASE__ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] SCREAMING_SNAKE_CASE__ = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2] SCREAMING_SNAKE_CASE__ = self.tokenizer( UpperCAmelCase__ , max_length=len(UpperCAmelCase__ ) , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors="""pt""" ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) SCREAMING_SNAKE_CASE__ = batch.input_ids.tolist()[0] self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]: if not self.test_rust_tokenizer: return SCREAMING_SNAKE_CASE__ = self.get_tokenizer() SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE__ = """I was born in 92000, and this is falsé.""" SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE__ = rust_tokenizer.tokenize(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) SCREAMING_SNAKE_CASE__ = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE__ = tokenizer.encode(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) @slow def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: SCREAMING_SNAKE_CASE__ = {"""input_ids""": [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. SCREAMING_SNAKE_CASE__ = [ """Le transformeur est un modèle d\'apprentissage profond introduit en 2017, """ """utilisé principalement dans le domaine du traitement automatique des langues (TAL).""", """À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """ """pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """ """telles que la traduction et la synthèse de texte.""", ] self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase__ , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=UpperCAmelCase__ , )
165
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any: """simple docstring""" A__ = [0] * len(lowercase_ ) A__ = [] A__ = [1] * len(lowercase_ ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(lowercase_ ) ): if indegree[i] == 0: queue.append(lowercase_ ) while queue: A__ = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: A__ = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(lowercase_ ) print(max(lowercase_ ) ) # Adjacency list of Graph _lowerCamelCase : Optional[int] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
14
0
import random import unittest from torch.utils.data import BatchSampler, DataLoader, IterableDataset from accelerate import Accelerator from accelerate.data_loader import ( BatchSamplerShard, DataLoaderDispatcher, DataLoaderShard, IterableDatasetShard, SkipBatchSampler, SkipDataLoader, skip_first_batches, ) class _UpperCamelCase ( UpperCAmelCase__ ): def __init__( self :Dict , lowerCamelCase :Optional[int]=0.01 , lowerCamelCase :Optional[Any]=1000 ) -> Optional[int]: UpperCAmelCase__ = p_stop UpperCAmelCase__ = max_length def __iter__( self :List[Any] ) -> List[Any]: UpperCAmelCase__ = 0 UpperCAmelCase__ = False while not stop and count < self.max_length: yield count count += 1 UpperCAmelCase__ = random.random() < self.p_stop class _UpperCamelCase ( unittest.TestCase ): def UpperCAmelCase_ ( self :List[str] , lowerCamelCase :Dict , lowerCamelCase :List[str] , lowerCamelCase :List[str]=False , lowerCamelCase :Any=True ) -> Optional[Any]: UpperCAmelCase__ = [ BatchSamplerShard(UpperCAmelCase__ , 2 , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__ ) for i in range(2 ) ] UpperCAmelCase__ = [list(UpperCAmelCase__ ) for batch_sampler_shard in batch_sampler_shards] if not split_batches: self.assertListEqual([len(UpperCAmelCase__ ) for shard in batch_sampler_shards] , [len(UpperCAmelCase__ ) for e in expected] ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def UpperCAmelCase_ ( self :Dict ) -> Any: UpperCAmelCase__ = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCAmelCase__ ) UpperCAmelCase__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase__ = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCAmelCase__ ) # Expected shouldn't change self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. UpperCAmelCase__ = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCAmelCase__ ) UpperCAmelCase__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase__ = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCAmelCase__ ) UpperCAmelCase__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. UpperCAmelCase__ = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCAmelCase__ ) UpperCAmelCase__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase__ = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCAmelCase__ ) UpperCAmelCase__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. UpperCAmelCase__ = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCAmelCase__ ) UpperCAmelCase__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase__ = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCAmelCase__ ) UpperCAmelCase__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ ) # Check the shards when the dataset is very small. UpperCAmelCase__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCAmelCase__ ) UpperCAmelCase__ = [[[0, 1, 0]], [[1, 0, 1]]] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCAmelCase__ ) UpperCAmelCase__ = [[], []] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ ) def UpperCAmelCase_ ( self :Optional[Any] ) -> Optional[Any]: UpperCAmelCase__ = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCAmelCase__ ) UpperCAmelCase__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ ) UpperCAmelCase__ = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCAmelCase__ ) # Expected shouldn't change self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ ) # Check the shards when the dataset is not a round multiple of batch size. UpperCAmelCase__ = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCAmelCase__ ) UpperCAmelCase__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ ) UpperCAmelCase__ = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCAmelCase__ ) UpperCAmelCase__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. UpperCAmelCase__ = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCAmelCase__ ) UpperCAmelCase__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ ) UpperCAmelCase__ = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCAmelCase__ ) UpperCAmelCase__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ ) # Check the shards when the dataset is very small. UpperCAmelCase__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCAmelCase__ ) UpperCAmelCase__ = [[[0, 1]], [[0, 1]]] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ ) UpperCAmelCase__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCAmelCase__ ) UpperCAmelCase__ = [[], []] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ ) def UpperCAmelCase_ ( self :List[str] ) -> Union[str, Any]: UpperCAmelCase__ = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCAmelCase__ ) UpperCAmelCase__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__ ) UpperCAmelCase__ = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCAmelCase__ ) # Expected shouldn't change self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__ ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. UpperCAmelCase__ = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCAmelCase__ ) UpperCAmelCase__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__ ) UpperCAmelCase__ = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCAmelCase__ ) UpperCAmelCase__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__ ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. UpperCAmelCase__ = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCAmelCase__ ) UpperCAmelCase__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__ ) UpperCAmelCase__ = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCAmelCase__ ) UpperCAmelCase__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__ ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. UpperCAmelCase__ = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCAmelCase__ ) UpperCAmelCase__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__ ) UpperCAmelCase__ = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCAmelCase__ ) UpperCAmelCase__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__ ) # Check the shards when the dataset is very small. UpperCAmelCase__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCAmelCase__ ) UpperCAmelCase__ = [[[0, 1]], []] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__ ) UpperCAmelCase__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCAmelCase__ ) UpperCAmelCase__ = [[], []] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__ ) def UpperCAmelCase_ ( self :Optional[Any] ) -> Tuple: UpperCAmelCase__ = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCAmelCase__ ) UpperCAmelCase__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__ ) UpperCAmelCase__ = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCAmelCase__ ) # Expected shouldn't change self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__ ) # Check the shards when the dataset is not a round multiple of batch size. UpperCAmelCase__ = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCAmelCase__ ) UpperCAmelCase__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__ ) UpperCAmelCase__ = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCAmelCase__ ) UpperCAmelCase__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__ ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. UpperCAmelCase__ = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCAmelCase__ ) UpperCAmelCase__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__ ) UpperCAmelCase__ = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCAmelCase__ ) UpperCAmelCase__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__ ) # Check the shards when the dataset is very small. UpperCAmelCase__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCAmelCase__ ) UpperCAmelCase__ = [[[0, 1]], []] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__ ) UpperCAmelCase__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCAmelCase__ ) UpperCAmelCase__ = [[], []] self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__ ) def UpperCAmelCase_ ( self :Union[str, Any] ) -> Any: UpperCAmelCase__ = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]] UpperCAmelCase__ = [BatchSamplerShard(UpperCAmelCase__ , 2 , UpperCAmelCase__ , even_batches=UpperCAmelCase__ ) for i in range(2 )] self.assertEqual(len(batch_sampler_shards[0] ) , 3 ) self.assertEqual(len(batch_sampler_shards[1] ) , 2 ) self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] ) self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] ) def UpperCAmelCase_ ( self :int , lowerCamelCase :List[Any] , lowerCamelCase :Optional[int] , lowerCamelCase :Tuple , lowerCamelCase :Tuple=False , lowerCamelCase :Optional[Any]=2 , lowerCamelCase :List[str]=False ) -> Optional[Any]: random.seed(UpperCAmelCase__ ) UpperCAmelCase__ = list(UpperCAmelCase__ ) UpperCAmelCase__ = [ IterableDatasetShard( UpperCAmelCase__ , batch_size=UpperCAmelCase__ , drop_last=UpperCAmelCase__ , num_processes=UpperCAmelCase__ , process_index=UpperCAmelCase__ , split_batches=UpperCAmelCase__ , ) for i in range(UpperCAmelCase__ ) ] UpperCAmelCase__ = [] for iterable_dataset_shard in iterable_dataset_shards: # Since our random iterable dataset will be... random... we need to use a seed to get reproducible results. random.seed(UpperCAmelCase__ ) iterable_dataset_lists.append(list(UpperCAmelCase__ ) ) UpperCAmelCase__ = batch_size // num_processes if split_batches else batch_size # All iterable dataset shard should have the same length, a round multiple of shard_batch_size UpperCAmelCase__ = iterable_dataset_lists[0] for l in iterable_dataset_lists[1:]: self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) ) self.assertTrue(len(UpperCAmelCase__ ) % shard_batch_size == 0 ) UpperCAmelCase__ = [] for idx in range(0 , len(UpperCAmelCase__ ) , UpperCAmelCase__ ): for l in iterable_dataset_lists: observed += l[idx : idx + shard_batch_size] if not drop_last: while len(UpperCAmelCase__ ) < len(UpperCAmelCase__ ): reference += reference self.assertListEqual(UpperCAmelCase__ , reference[: len(UpperCAmelCase__ )] ) def UpperCAmelCase_ ( self :Any ) -> Optional[Any]: UpperCAmelCase__ = 42 UpperCAmelCase__ = RandomIterableDataset() self.check_iterable_dataset_shards(UpperCAmelCase__ , UpperCAmelCase__ , batch_size=4 , drop_last=UpperCAmelCase__ , split_batches=UpperCAmelCase__ ) self.check_iterable_dataset_shards(UpperCAmelCase__ , UpperCAmelCase__ , batch_size=4 , drop_last=UpperCAmelCase__ , split_batches=UpperCAmelCase__ ) self.check_iterable_dataset_shards(UpperCAmelCase__ , UpperCAmelCase__ , batch_size=4 , drop_last=UpperCAmelCase__ , split_batches=UpperCAmelCase__ ) self.check_iterable_dataset_shards(UpperCAmelCase__ , UpperCAmelCase__ , batch_size=4 , drop_last=UpperCAmelCase__ , split_batches=UpperCAmelCase__ ) # Edge case with a very small dataset UpperCAmelCase__ = RandomIterableDataset(max_length=2 ) self.check_iterable_dataset_shards(UpperCAmelCase__ , UpperCAmelCase__ , batch_size=4 , drop_last=UpperCAmelCase__ , split_batches=UpperCAmelCase__ ) self.check_iterable_dataset_shards(UpperCAmelCase__ , UpperCAmelCase__ , batch_size=4 , drop_last=UpperCAmelCase__ , split_batches=UpperCAmelCase__ ) self.check_iterable_dataset_shards(UpperCAmelCase__ , UpperCAmelCase__ , batch_size=4 , drop_last=UpperCAmelCase__ , split_batches=UpperCAmelCase__ ) self.check_iterable_dataset_shards(UpperCAmelCase__ , UpperCAmelCase__ , batch_size=4 , drop_last=UpperCAmelCase__ , split_batches=UpperCAmelCase__ ) def UpperCAmelCase_ ( self :Dict ) -> Optional[int]: UpperCAmelCase__ = BatchSampler(range(16 ) , batch_size=4 , drop_last=UpperCAmelCase__ ) UpperCAmelCase__ = SkipBatchSampler(UpperCAmelCase__ , 2 ) self.assertListEqual(list(UpperCAmelCase__ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def UpperCAmelCase_ ( self :Optional[int] ) -> List[str]: UpperCAmelCase__ = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 ) self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def UpperCAmelCase_ ( self :Optional[Any] ) -> Optional[Any]: UpperCAmelCase__ = DataLoader(list(range(16 ) ) , batch_size=4 ) UpperCAmelCase__ = skip_first_batches(UpperCAmelCase__ , num_batches=2 ) self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def UpperCAmelCase_ ( self :Optional[int] ) -> List[str]: UpperCAmelCase__ = DataLoaderShard(list(range(16 ) ) , batch_size=4 ) for idx, _ in enumerate(UpperCAmelCase__ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(UpperCAmelCase__ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) def UpperCAmelCase_ ( self :List[str] ) -> Dict: Accelerator() UpperCAmelCase__ = DataLoaderDispatcher(range(16 ) , batch_size=4 ) for idx, _ in enumerate(UpperCAmelCase__ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(UpperCAmelCase__ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
169
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline _lowerCamelCase : Optional[Any] = datasets.utils.logging.get_logger(__name__) @dataclass class UpperCamelCase_ ( datasets.BuilderConfig ): '''simple docstring''' UpperCAmelCase__ = None UpperCAmelCase__ = "utf-8" UpperCAmelCase__ = None UpperCAmelCase__ = None UpperCAmelCase__ = True # deprecated UpperCAmelCase__ = None # deprecated UpperCAmelCase__ = 10 << 20 # 10MB UpperCAmelCase__ = None class UpperCamelCase_ ( datasets.ArrowBasedBuilder ): '''simple docstring''' UpperCAmelCase__ = JsonConfig def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str: '''simple docstring''' if self.config.block_size is not None: logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''') A__ = self.config.block_size if self.config.use_threads is not True: logger.warning( '''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''') if self.config.newlines_in_values is not None: raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''') return datasets.DatasetInfo(features=self.config.features) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : List[Any]) ->Dict: '''simple docstring''' if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""") A__ = dl_manager.download_and_extract(self.config.data_files) if isinstance(UpperCAmelCase__ , (str, list, tuple)): A__ = data_files if isinstance(UpperCAmelCase__ , UpperCAmelCase__): A__ = [files] A__ = [dl_manager.iter_files(UpperCAmelCase__) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files})] A__ = [] for split_name, files in data_files.items(): if isinstance(UpperCAmelCase__ , UpperCAmelCase__): A__ = [files] A__ = [dl_manager.iter_files(UpperCAmelCase__) for file in files] splits.append(datasets.SplitGenerator(name=UpperCAmelCase__ , gen_kwargs={'''files''': files})) return splits def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : pa.Table) ->pa.Table: '''simple docstring''' if self.config.features is not None: # adding missing columns for column_name in set(self.config.features) - set(pa_table.column_names): A__ = self.config.features.arrow_schema.field(UpperCAmelCase__).type A__ = pa_table.append_column(UpperCAmelCase__ , pa.array([None] * len(UpperCAmelCase__) , type=UpperCAmelCase__)) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example A__ = table_cast(UpperCAmelCase__ , self.config.features.arrow_schema) return pa_table def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Tuple) ->str: '''simple docstring''' for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase__)): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(UpperCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: A__ = json.load(UpperCAmelCase__) # We keep only the field we are interested in A__ = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(UpperCAmelCase__ , (list, tuple)): A__ = set().union(*[row.keys() for row in dataset]) A__ = {col: [row.get(UpperCAmelCase__) for row in dataset] for col in keys} else: A__ = dataset A__ = pa.Table.from_pydict(UpperCAmelCase__) yield file_idx, self._cast_table(UpperCAmelCase__) # If the file has one json object per line else: with open(UpperCAmelCase__ , '''rb''') as f: A__ = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small A__ = max(self.config.chunksize // 32 , 16 << 10) A__ = ( self.config.encoding_errors if self.config.encoding_errors is not None else '''strict''' ) while True: A__ = f.read(self.config.chunksize) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(UpperCAmelCase__) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": A__ = batch.decode(self.config.encoding , errors=UpperCAmelCase__).encode('''utf-8''') try: while True: try: A__ = paj.read_json( io.BytesIO(UpperCAmelCase__) , read_options=paj.ReadOptions(block_size=UpperCAmelCase__)) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(UpperCAmelCase__ , pa.ArrowInvalid) and "straddling" not in str(UpperCAmelCase__) or block_size > len(UpperCAmelCase__) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( f"""Batch of {len(UpperCAmelCase__)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""") block_size *= 2 except pa.ArrowInvalid as e: try: with open( UpperCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f: A__ = json.load(UpperCAmelCase__) except json.JSONDecodeError: logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""") raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(UpperCAmelCase__ , UpperCAmelCase__): # list is the only sequence type supported in JSON try: A__ = set().union(*[row.keys() for row in dataset]) A__ = {col: [row.get(UpperCAmelCase__) for row in dataset] for col in keys} A__ = pa.Table.from_pydict(UpperCAmelCase__) except (pa.ArrowInvalid, AttributeError) as e: logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""") raise ValueError(f"""Not able to read records in the JSON file at {file}.""") from None yield file_idx, self._cast_table(UpperCAmelCase__) break else: logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""") raise ValueError( f"""Not able to read records in the JSON file at {file}. """ f"""You should probably indicate the field of the JSON file containing your records. """ f"""This JSON file contain the following fields: {str(list(dataset.keys()))}. """ f"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase__) batch_idx += 1
14
0
"""simple docstring""" import unittest import numpy as np import torch from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import enable_full_determinism, skip_mps from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowerCAmelCase_ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' _snake_case = KandinskyVaaPriorPipeline _snake_case = ['''prompt'''] _snake_case = ['''prompt''', '''negative_prompt'''] _snake_case = [ '''num_images_per_prompt''', '''generator''', '''num_inference_steps''', '''latents''', '''negative_prompt''', '''guidance_scale''', '''output_type''', '''return_dict''', ] _snake_case = False @property def A__ ( self ) -> Optional[int]: return 32 @property def A__ ( self ) -> Union[str, Any]: return 32 @property def A__ ( self ) -> Optional[Any]: return self.time_input_dim @property def A__ ( self ) -> Tuple: return self.time_input_dim * 4 @property def A__ ( self ) -> Optional[int]: return 100 @property def A__ ( self ) -> Union[str, Any]: __lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) return tokenizer @property def A__ ( self ) -> List[str]: torch.manual_seed(0 ) __lowerCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) return CLIPTextModelWithProjection(UpperCAmelCase__ ) @property def A__ ( self ) -> Optional[Any]: torch.manual_seed(0 ) __lowerCAmelCase = { """num_attention_heads""": 2, """attention_head_dim""": 12, """embedding_dim""": self.text_embedder_hidden_size, """num_layers""": 1, } __lowerCAmelCase = PriorTransformer(**UpperCAmelCase__ ) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 __lowerCAmelCase = nn.Parameter(torch.ones(model.clip_std.shape ) ) return model @property def A__ ( self ) -> int: torch.manual_seed(0 ) __lowerCAmelCase = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , ) __lowerCAmelCase = CLIPVisionModelWithProjection(UpperCAmelCase__ ) return model @property def A__ ( self ) -> Tuple: __lowerCAmelCase = CLIPImageProcessor( crop_size=224 , do_center_crop=UpperCAmelCase__ , do_normalize=UpperCAmelCase__ , do_resize=UpperCAmelCase__ , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=224 , ) return image_processor def A__ ( self ) -> Tuple: __lowerCAmelCase = self.dummy_prior __lowerCAmelCase = self.dummy_image_encoder __lowerCAmelCase = self.dummy_text_encoder __lowerCAmelCase = self.dummy_tokenizer __lowerCAmelCase = self.dummy_image_processor __lowerCAmelCase = UnCLIPScheduler( variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1_000 , clip_sample=UpperCAmelCase__ , clip_sample_range=10.0 , ) __lowerCAmelCase = { """prior""": prior, """image_encoder""": image_encoder, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """scheduler""": scheduler, """image_processor""": image_processor, } return components def A__ ( self , snake_case_ , snake_case_=0 ) -> str: if str(UpperCAmelCase__ ).startswith("""mps""" ): __lowerCAmelCase = torch.manual_seed(UpperCAmelCase__ ) else: __lowerCAmelCase = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ ) __lowerCAmelCase = { """prompt""": """horse""", """generator""": generator, """guidance_scale""": 4.0, """num_inference_steps""": 2, """output_type""": """np""", } return inputs def A__ ( self ) -> Tuple: __lowerCAmelCase = """cpu""" __lowerCAmelCase = self.get_dummy_components() __lowerCAmelCase = self.pipeline_class(**UpperCAmelCase__ ) __lowerCAmelCase = pipe.to(UpperCAmelCase__ ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) __lowerCAmelCase = pipe(**self.get_dummy_inputs(UpperCAmelCase__ ) ) __lowerCAmelCase = output.image_embeds __lowerCAmelCase = pipe( **self.get_dummy_inputs(UpperCAmelCase__ ) , return_dict=UpperCAmelCase__ , )[0] __lowerCAmelCase = image[0, -10:] __lowerCAmelCase = image_from_tuple[0, -10:] assert image.shape == (1, 32) __lowerCAmelCase = np.array( [-0.0_532, 1.7_120, 0.3_656, -1.0_852, -0.8_946, -1.1_756, 0.4_348, 0.2_482, 0.5_146, -0.1_156] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def A__ ( self ) -> Optional[Any]: __lowerCAmelCase = torch_device == """cpu""" __lowerCAmelCase = True __lowerCAmelCase = False self._test_inference_batch_single_identical( test_max_difference=UpperCAmelCase__ , relax_max_difference=UpperCAmelCase__ , test_mean_pixel_difference=UpperCAmelCase__ , ) @skip_mps def A__ ( self ) -> List[str]: __lowerCAmelCase = torch_device == """cpu""" __lowerCAmelCase = False self._test_attention_slicing_forward_pass( test_max_difference=UpperCAmelCase__ , test_mean_pixel_difference=UpperCAmelCase__ , )
301
import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch _lowerCamelCase : List[Any] = """sshleifer/bart-tiny-random""" _lowerCamelCase : List[Any] = """patrickvonplaten/t5-tiny-random""" @require_torch class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int: '''simple docstring''' return AutoConfig.from_pretrained(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any: '''simple docstring''' A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1) self.assertEqual(student.config.num_hidden_layers , 1) def SCREAMING_SNAKE_CASE ( self : int) ->Any: '''simple docstring''' A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]: '''simple docstring''' A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase__) self.assertEqual(student.config.encoder_layers , 1) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers) def SCREAMING_SNAKE_CASE ( self : Dict) ->int: '''simple docstring''' A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1) self.assertEqual(student.config.encoder_layers , 1) self.assertEqual(student.config.decoder_layers , 1) def SCREAMING_SNAKE_CASE ( self : str) ->List[Any]: '''simple docstring''' with self.assertRaises(UpperCAmelCase__): create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=UpperCAmelCase__ , d=UpperCAmelCase__)
14
0
from ...processing_utils import ProcessorMixin class lowercase_ ( UpperCAmelCase__ ): A__ : str = """SpeechT5FeatureExtractor""" A__ : Dict = """SpeechT5Tokenizer""" def __init__( self , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" super().__init__(UpperCAmelCase__ , UpperCAmelCase__ ) def __call__( self , *__UpperCamelCase , **__UpperCamelCase ): """simple docstring""" UpperCamelCase_ = kwargs.pop("""audio""" , UpperCAmelCase__ ) UpperCamelCase_ = kwargs.pop("""text""" , UpperCAmelCase__ ) UpperCamelCase_ = kwargs.pop("""text_target""" , UpperCAmelCase__ ) UpperCamelCase_ = kwargs.pop("""audio_target""" , UpperCAmelCase__ ) UpperCamelCase_ = kwargs.pop("""sampling_rate""" , UpperCAmelCase__ ) if audio is not None and text is not None: raise ValueError( """Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?""" ) if audio_target is not None and text_target is not None: raise ValueError( """Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?""" ) if audio is None and audio_target is None and text is None and text_target is None: raise ValueError( """You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.""" ) if audio is not None: UpperCamelCase_ = self.feature_extractor(UpperCAmelCase__ , *UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , **UpperCAmelCase__ ) elif text is not None: UpperCamelCase_ = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__ ) else: UpperCamelCase_ = None if audio_target is not None: UpperCamelCase_ = self.feature_extractor(audio_target=UpperCAmelCase__ , *UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , **UpperCAmelCase__ ) UpperCamelCase_ = targets["""input_values"""] elif text_target is not None: UpperCamelCase_ = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__ ) UpperCamelCase_ = targets["""input_ids"""] else: UpperCamelCase_ = None if inputs is None: return targets if targets is not None: UpperCamelCase_ = labels UpperCamelCase_ = targets.get("""attention_mask""" ) if decoder_attention_mask is not None: UpperCamelCase_ = decoder_attention_mask return inputs def lowerCamelCase_ ( self , *__UpperCamelCase , **__UpperCamelCase ): """simple docstring""" UpperCamelCase_ = kwargs.pop("""input_values""" , UpperCAmelCase__ ) UpperCamelCase_ = kwargs.pop("""input_ids""" , UpperCAmelCase__ ) UpperCamelCase_ = kwargs.pop("""labels""" , UpperCAmelCase__ ) if input_values is not None and input_ids is not None: raise ValueError("""Cannot process both `input_values` and `input_ids` inputs.""" ) if input_values is None and input_ids is None and labels is None: raise ValueError( """You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.""" ) if input_values is not None: UpperCamelCase_ = self.feature_extractor.pad(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__ ) elif input_ids is not None: UpperCamelCase_ = self.tokenizer.pad(UpperCAmelCase__ , **UpperCAmelCase__ ) else: UpperCamelCase_ = None if labels is not None: if "input_ids" in labels or (isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and "input_ids" in labels[0]): UpperCamelCase_ = self.tokenizer.pad(UpperCAmelCase__ , **UpperCAmelCase__ ) UpperCamelCase_ = targets["""input_ids"""] else: UpperCamelCase_ = self.feature_extractor.feature_size UpperCamelCase_ = self.feature_extractor.num_mel_bins UpperCamelCase_ = self.feature_extractor.pad(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__ ) UpperCamelCase_ = feature_size_hack UpperCamelCase_ = targets["""input_values"""] else: UpperCamelCase_ = None if inputs is None: return targets if targets is not None: UpperCamelCase_ = labels UpperCamelCase_ = targets.get("""attention_mask""" ) if decoder_attention_mask is not None: UpperCamelCase_ = decoder_attention_mask return inputs def lowerCamelCase_ ( self , *__UpperCamelCase , **__UpperCamelCase ): """simple docstring""" return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__ ) def lowerCamelCase_ ( self , *__UpperCamelCase , **__UpperCamelCase ): """simple docstring""" return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
122
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Dict=3 , UpperCAmelCase__ : List[Any]=30 , UpperCAmelCase__ : Any=400 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Optional[Any]=[0.5, 0.5, 0.5] , UpperCAmelCase__ : Any=[0.5, 0.5, 0.5] , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Optional[int]=1 / 255 , UpperCAmelCase__ : Optional[Any]=True , ) ->str: '''simple docstring''' A__ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333} A__ = parent A__ = batch_size A__ = num_channels A__ = min_resolution A__ = max_resolution A__ = do_resize A__ = size A__ = do_normalize A__ = image_mean A__ = image_std A__ = do_rescale A__ = rescale_factor A__ = do_pad def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]: '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int=False) ->Optional[Any]: '''simple docstring''' if not batched: A__ = image_inputs[0] if isinstance(UpperCAmelCase__ , Image.Image): A__ , A__ = image.size else: A__ , A__ = image.shape[1], image.shape[2] if w < h: A__ = int(self.size['''shortest_edge'''] * h / w) A__ = self.size['''shortest_edge'''] elif w > h: A__ = self.size['''shortest_edge'''] A__ = int(self.size['''shortest_edge'''] * w / h) else: A__ = self.size['''shortest_edge'''] A__ = self.size['''shortest_edge'''] else: A__ = [] for image in image_inputs: A__ , A__ = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) A__ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__: item[0])[0] A__ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__: item[1])[1] return expected_height, expected_width @require_torch @require_vision class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = DeformableDetrImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple: '''simple docstring''' A__ = DeformableDetrImageProcessingTester(self) @property def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(UpperCAmelCase__ , '''image_mean''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''image_std''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_rescale''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_pad''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''size''')) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int: '''simple docstring''' A__ = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333}) self.assertEqual(image_processor.do_pad , UpperCAmelCase__) A__ = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCAmelCase__) self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84}) self.assertEqual(image_processor.do_pad , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]: '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) # create random PIL images A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , Image.Image) # Test not batched input A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__) A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , np.ndarray) # Test not batched input A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE ( self : int) ->Tuple: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , torch.Tensor) # Test not batched input A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[str]: '''simple docstring''' A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''') with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''') as f: A__ = json.loads(f.read()) A__ = {'''image_id''': 39_769, '''annotations''': target} # encode them A__ = DeformableDetrImageProcessor() A__ = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , return_tensors='''pt''') # verify pixel values A__ = torch.Size([1, 3, 800, 1_066]) self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase__) A__ = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase__ , atol=1e-4)) # verify area A__ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase__)) # verify boxes A__ = torch.Size([6, 4]) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase__) A__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase__ , atol=1e-3)) # verify image_id A__ = torch.tensor([39_769]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase__)) # verify is_crowd A__ = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase__)) # verify class_labels A__ = torch.tensor([75, 75, 63, 65, 17, 17]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase__)) # verify orig_size A__ = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase__)) # verify size A__ = torch.tensor([800, 1_066]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase__)) @slow def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[int]: '''simple docstring''' A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''') with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''') as f: A__ = json.loads(f.read()) A__ = {'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target} A__ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''') # encode them A__ = DeformableDetrImageProcessor(format='''coco_panoptic''') A__ = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , masks_path=UpperCAmelCase__ , return_tensors='''pt''') # verify pixel values A__ = torch.Size([1, 3, 800, 1_066]) self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase__) A__ = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase__ , atol=1e-4)) # verify area A__ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase__)) # verify boxes A__ = torch.Size([6, 4]) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase__) A__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase__ , atol=1e-3)) # verify image_id A__ = torch.tensor([39_769]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase__)) # verify is_crowd A__ = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase__)) # verify class_labels A__ = torch.tensor([17, 17, 63, 75, 75, 93]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase__)) # verify masks A__ = 822_873 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCAmelCase__) # verify orig_size A__ = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase__)) # verify size A__ = torch.tensor([800, 1_066]) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase__))
14
0
"""simple docstring""" import json import os import tempfile import transformers import datasets from utils import generate_example_dataset, get_duration _UpperCamelCase : Any = 5_0_0_0_0_0 _UpperCamelCase : List[Any] = os.path.split(__file__) _UpperCamelCase : Tuple = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json')) @get_duration def _SCREAMING_SNAKE_CASE ( __snake_case : int , **__snake_case : str ): '''simple docstring''' lowercase = dataset.map(**lowercase_ ) @get_duration def _SCREAMING_SNAKE_CASE ( __snake_case : List[str] , **__snake_case : int ): '''simple docstring''' lowercase = dataset.filter(**lowercase_ ) def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowercase = {'num examples': SPEED_TEST_N_EXAMPLES} with tempfile.TemporaryDirectory() as tmp_dir: lowercase = datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} ) lowercase = generate_example_dataset( os.path.join(lowercase_ , 'dataset.arrow' ) , lowercase_ , num_examples=lowercase_ ) lowercase = transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=lowercase_ ) def tokenize(__snake_case : Dict ): return tokenizer(examples['text'] ) lowercase = map(lowercase_ ) lowercase = map(lowercase_ , batched=lowercase_ ) lowercase = map(lowercase_ , function=lambda __snake_case : None , batched=lowercase_ ) with dataset.formatted_as(type='numpy' ): lowercase = map(lowercase_ , function=lambda __snake_case : None , batched=lowercase_ ) with dataset.formatted_as(type='pandas' ): lowercase = map(lowercase_ , function=lambda __snake_case : None , batched=lowercase_ ) with dataset.formatted_as(type='torch' , columns='numbers' ): lowercase = map(lowercase_ , function=lambda __snake_case : None , batched=lowercase_ ) with dataset.formatted_as(type='tensorflow' , columns='numbers' ): lowercase = map(lowercase_ , function=lambda __snake_case : None , batched=lowercase_ ) lowercase = map(lowercase_ , function=lowercase_ , batched=lowercase_ ) lowercase = filter(lowercase_ ) # Activate later when tokenizer support batched inputs # with dataset.formatted_as(type='numpy'): # times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True) with open(lowercase_ , 'wb' ) as f: f.write(json.dumps(lowercase_ ).encode('utf-8' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_map_filter()
220
from __future__ import annotations import typing from collections.abc import Iterable import numpy as np _lowerCamelCase : str = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007 _lowerCamelCase : Tuple = typing.Union[np.floataa, int, float] # noqa: UP007 def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> VectorOut: """simple docstring""" return np.sqrt(np.sum((np.asarray(lowercase_ ) - np.asarray(lowercase_ )) ** 2 ) ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> VectorOut: """simple docstring""" return sum((va - va) ** 2 for va, va in zip(lowercase_ , lowercase_ ) ) ** (1 / 2) if __name__ == "__main__": def SCREAMING_SNAKE_CASE ( ) -> None: """simple docstring""" from timeit import timeit print('''Without Numpy''' ) print( timeit( '''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) ) print('''With Numpy''' ) print( timeit( '''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) ) benchmark()
14
0