code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''sail/poolformer_s12''': '''https://huggingface.co/sail/poolformer_s12/resolve/main/config.json''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class lowercase ( A__ ):
"""simple docstring"""
_a = 'poolformer'
def __init__( self , UpperCamelCase_=3 , UpperCamelCase_=16 , UpperCamelCase_=16 , UpperCamelCase_=3 , UpperCamelCase_=4.0 , UpperCamelCase_=[2, 2, 6, 2] , UpperCamelCase_=[64, 128, 320, 512] , UpperCamelCase_=[7, 3, 3, 3] , UpperCamelCase_=[4, 2, 2, 2] , UpperCamelCase_=[2, 1, 1, 1] , UpperCamelCase_=4 , UpperCamelCase_=0.0 , UpperCamelCase_="gelu" , UpperCamelCase_=True , UpperCamelCase_=1e-5 , UpperCamelCase_=0.02 , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = num_channels
UpperCamelCase__ :Optional[Any] = patch_size
UpperCamelCase__ :List[Any] = stride
UpperCamelCase__ :Any = padding
UpperCamelCase__ :List[str] = pool_size
UpperCamelCase__ :str = hidden_sizes
UpperCamelCase__ :Union[str, Any] = mlp_ratio
UpperCamelCase__ :int = depths
UpperCamelCase__ :str = patch_sizes
UpperCamelCase__ :Tuple = strides
UpperCamelCase__ :Dict = num_encoder_blocks
UpperCamelCase__ :int = drop_path_rate
UpperCamelCase__ :Tuple = hidden_act
UpperCamelCase__ :str = use_layer_scale
UpperCamelCase__ :List[str] = layer_scale_init_value
UpperCamelCase__ :int = initializer_range
super().__init__(**_SCREAMING_SNAKE_CASE )
class lowercase ( A__ ):
"""simple docstring"""
_a = version.parse('1.11' )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return 2e-3 | 97 |
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance == 0:
return {"resistance": sqrt(pow(__UpperCamelCase , 2 ) - pow(__UpperCamelCase , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(__UpperCamelCase , 2 ) - pow(__UpperCamelCase , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(__UpperCamelCase , 2 ) + pow(__UpperCamelCase , 2 ) )}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 321 | 0 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={"""vocab_file""": """spiece.model"""}
_lowerCamelCase ={
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
}
}
_lowerCamelCase ={
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
# Segments (not really needed)
_lowerCamelCase =0
_lowerCamelCase =1
_lowerCamelCase =2
_lowerCamelCase =3
_lowerCamelCase =4
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = VOCAB_FILES_NAMES
_UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Optional[Any] = """left"""
def __init__( self , __magic_name__ , __magic_name__=False , __magic_name__=True , __magic_name__=False , __magic_name__="<s>" , __magic_name__="</s>" , __magic_name__="<unk>" , __magic_name__="<sep>" , __magic_name__="<pad>" , __magic_name__="<cls>" , __magic_name__="<mask>" , __magic_name__=["<eop>", "<eod>"] , __magic_name__ = None , **__magic_name__ , ):
lowerCamelCase : Dict = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
lowerCamelCase : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_SCREAMING_SNAKE_CASE , remove_space=_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , )
lowerCamelCase : Optional[Any] = 3
lowerCamelCase : Optional[int] = do_lower_case
lowerCamelCase : Optional[int] = remove_space
lowerCamelCase : Optional[Any] = keep_accents
lowerCamelCase : int = vocab_file
lowerCamelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_SCREAMING_SNAKE_CASE )
@property
def UpperCamelCase__ ( self ):
return len(self.sp_model )
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
lowerCamelCase : Any = self.__dict__.copy()
lowerCamelCase : Dict = None
return state
def __setstate__( self , __magic_name__ ):
lowerCamelCase : List[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCamelCase : Tuple = {}
lowerCamelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase__ ( self , __magic_name__ ):
if self.remove_space:
lowerCamelCase : List[Any] = """ """.join(inputs.strip().split() )
else:
lowerCamelCase : Optional[Any] = inputs
lowerCamelCase : Union[str, Any] = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
lowerCamelCase : List[Any] = unicodedata.normalize("""NFKD""" , _SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = """""".join([c for c in outputs if not unicodedata.combining(_SCREAMING_SNAKE_CASE )] )
if self.do_lower_case:
lowerCamelCase : Optional[Any] = outputs.lower()
return outputs
def UpperCamelCase__ ( self , __magic_name__ ):
lowerCamelCase : Tuple = self.preprocess_text(_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[Any] = self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
lowerCamelCase : Dict = []
for piece in pieces:
if len(_SCREAMING_SNAKE_CASE ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
lowerCamelCase : Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_SCREAMING_SNAKE_CASE , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase : Optional[Any] = cur_pieces[1:]
else:
lowerCamelCase : Optional[Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_SCREAMING_SNAKE_CASE )
else:
new_pieces.append(_SCREAMING_SNAKE_CASE )
return new_pieces
def UpperCamelCase__ ( self , __magic_name__ ):
return self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( self , __magic_name__ ):
return self.sp_model.IdToPiece(_SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( self , __magic_name__ ):
lowerCamelCase : Optional[int] = """""".join(_SCREAMING_SNAKE_CASE ).replace(_SCREAMING_SNAKE_CASE , """ """ ).strip()
return out_string
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = False , __magic_name__ = None , __magic_name__ = True , **__magic_name__ , ):
lowerCamelCase : List[Any] = kwargs.pop("""use_source_tokenizer""" , _SCREAMING_SNAKE_CASE )
lowerCamelCase : Tuple = self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCamelCase : Union[str, Any] = []
lowerCamelCase : Union[str, Any] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_SCREAMING_SNAKE_CASE ) )
lowerCamelCase : Optional[int] = []
sub_texts.append(_SCREAMING_SNAKE_CASE )
else:
current_sub_text.append(_SCREAMING_SNAKE_CASE )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_SCREAMING_SNAKE_CASE ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
lowerCamelCase : Optional[Any] = """""".join(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Any = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCamelCase : Dict = self.clean_up_tokenization(_SCREAMING_SNAKE_CASE )
return clean_text
else:
return text
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None ):
lowerCamelCase : Dict = [self.sep_token_id]
lowerCamelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
if token_ids_a is not None:
return ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1, 1]
return ([0] * len(_SCREAMING_SNAKE_CASE )) + [1, 1]
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None ):
lowerCamelCase : Dict = [self.sep_token_id]
lowerCamelCase : Union[str, Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None ):
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase : Optional[int] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , """wb""" ) as fi:
lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 287 |
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def lowercase__ ( __UpperCamelCase )-> Union[str, Any]:
UpperCamelCase = min(__UpperCamelCase ) # min() finds the minimum value
UpperCamelCase = max(__UpperCamelCase ) # max() finds the maximum value
UpperCamelCase = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
UpperCamelCase = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(__UpperCamelCase , __UpperCamelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
UpperCamelCase = 0
for count in range(__UpperCamelCase ):
while holes[count] > 0:
holes[count] -= 1
UpperCamelCase = count + min_val
i += 1
def lowercase__ ( )-> Any:
UpperCamelCase = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(__UpperCamelCase )
print("""Sorted order is:""" , """ """.join(__UpperCamelCase ) )
if __name__ == "__main__":
main()
| 321 | 0 |
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
lowerCAmelCase_ = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
lowerCAmelCase_ = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
lowerCAmelCase_ = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) ,reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] ,)
def UpperCAmelCase ( self : Tuple ,_snake_case : int ,_snake_case : str ,_snake_case : List[str]=None ,_snake_case : Optional[Any]=1 ,_snake_case : List[str]="binary" ,_snake_case : List[str]=None ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : List[str] = fa_score(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,labels=_SCREAMING_SNAKE_CASE ,pos_label=_SCREAMING_SNAKE_CASE ,average=_SCREAMING_SNAKE_CASE ,sample_weight=_SCREAMING_SNAKE_CASE )
return {"f1": float(_SCREAMING_SNAKE_CASE ) if score.size == 1 else score}
| 16 |
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class a_ ( lowerCamelCase ):
lowercase = (DDPMParallelScheduler,)
def A__ ( self , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def A__ ( self ) -> List[str]:
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=_SCREAMING_SNAKE_CASE , beta_end=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Tuple:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> str:
"""simple docstring"""
self.check_over_configs(thresholding=_SCREAMING_SNAKE_CASE )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_SCREAMING_SNAKE_CASE , prediction_type=_SCREAMING_SNAKE_CASE , sample_max_value=_SCREAMING_SNAKE_CASE , )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = self.dummy_sample_deter + 0.1
UpperCamelCase = self.dummy_sample_deter - 0.1
UpperCamelCase = samplea.shape[0]
UpperCamelCase = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCamelCase = torch.arange(_SCREAMING_SNAKE_CASE )[0:3, None].repeat(1 , _SCREAMING_SNAKE_CASE )
UpperCamelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCamelCase = scheduler.batch_step_no_noise(_SCREAMING_SNAKE_CASE , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1e-2
assert abs(result_mean.item() - 0.5_0_0_5 ) < 1e-3
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(_SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
UpperCamelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(_SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
UpperCamelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
UpperCamelCase = scheduler.timesteps
for i, timestep in enumerate(_SCREAMING_SNAKE_CASE ):
if i == len(_SCREAMING_SNAKE_CASE ) - 1:
UpperCamelCase = -1
else:
UpperCamelCase = timesteps[i + 1]
UpperCamelCase = scheduler.previous_timestep(_SCREAMING_SNAKE_CASE )
UpperCamelCase = prev_t.item()
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = [100, 87, 50, 51, 0]
with self.assertRaises(_SCREAMING_SNAKE_CASE , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = [100, 87, 50, 1, 0]
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
with self.assertRaises(_SCREAMING_SNAKE_CASE , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=_SCREAMING_SNAKE_CASE , timesteps=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_SCREAMING_SNAKE_CASE , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
| 321 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = tempfile.mkdtemp()
UpperCAmelCase_ : Tuple = BlipImageProcessor()
UpperCAmelCase_ : Dict = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
UpperCAmelCase_ : Tuple = BertTokenizerFast.from_pretrained("hf-internal-testing/tiny-random-bert" )
UpperCAmelCase_ : List[str] = InstructBlipProcessor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE ).tokenizer
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE ).image_processor
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE ).qformer_tokenizer
def UpperCamelCase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase_ : Tuple = [Image.fromarray(np.moveaxis(_SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : int = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
UpperCAmelCase_ : List[str] = self.get_image_processor(do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
UpperCAmelCase_ : Union[str, Any] = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor.qformer_tokenizer , _SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.get_image_processor()
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : Union[str, Any] = self.get_qformer_tokenizer()
UpperCAmelCase_ : int = InstructBlipProcessor(
tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE , qformer_tokenizer=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = self.prepare_image_inputs()
UpperCAmelCase_ : Any = image_processor(_SCREAMING_SNAKE_CASE , return_tensors="np" )
UpperCAmelCase_ : Any = processor(images=_SCREAMING_SNAKE_CASE , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.get_image_processor()
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = self.get_qformer_tokenizer()
UpperCAmelCase_ : Dict = InstructBlipProcessor(
tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE , qformer_tokenizer=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = "lower newer"
UpperCAmelCase_ : Optional[int] = processor(text=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = tokenizer(_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = qformer_tokenizer(_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["qformer_" + key] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : List[Any] = self.get_qformer_tokenizer()
UpperCAmelCase_ : Union[str, Any] = InstructBlipProcessor(
tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE , qformer_tokenizer=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = "lower newer"
UpperCAmelCase_ : Union[str, Any] = self.prepare_image_inputs()
UpperCAmelCase_ : Optional[Any] = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.get_image_processor()
UpperCAmelCase_ : Tuple = self.get_tokenizer()
UpperCAmelCase_ : List[str] = self.get_qformer_tokenizer()
UpperCAmelCase_ : Union[str, Any] = InstructBlipProcessor(
tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE , qformer_tokenizer=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ : Union[str, Any] = processor.batch_decode(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = self.get_image_processor()
UpperCAmelCase_ : Optional[int] = self.get_tokenizer()
UpperCAmelCase_ : Tuple = self.get_qformer_tokenizer()
UpperCAmelCase_ : Any = InstructBlipProcessor(
tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE , qformer_tokenizer=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = "lower newer"
UpperCAmelCase_ : str = self.prepare_image_inputs()
UpperCAmelCase_ : Dict = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
| 61 |
'''simple docstring'''
from __future__ import annotations
import math
class a_ :
def __init__( self , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
UpperCamelCase = size
# approximate the overall size of segment tree with given value
UpperCamelCase = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
UpperCamelCase = [0 for i in range(0 , 4 * size )]
UpperCamelCase = [0 for i in range(0 , 4 * size )] # flag for lazy update
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return idx * 2
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return idx * 2 + 1
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
if left_element == right_element:
UpperCamelCase = a[left_element - 1]
else:
UpperCamelCase = (left_element + right_element) // 2
self.build(self.left(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.build(self.right(_SCREAMING_SNAKE_CASE ) , mid + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = max(
self.segment_tree[self.left(_SCREAMING_SNAKE_CASE )] , self.segment_tree[self.right(_SCREAMING_SNAKE_CASE )] )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
if self.flag[idx] is True:
UpperCamelCase = self.lazy[idx]
UpperCamelCase = False
if left_element != right_element:
UpperCamelCase = self.lazy[idx]
UpperCamelCase = self.lazy[idx]
UpperCamelCase = True
UpperCamelCase = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
UpperCamelCase = val
if left_element != right_element:
UpperCamelCase = val
UpperCamelCase = val
UpperCamelCase = True
UpperCamelCase = True
return True
UpperCamelCase = (left_element + right_element) // 2
self.update(self.left(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.update(self.right(_SCREAMING_SNAKE_CASE ) , mid + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = max(
self.segment_tree[self.left(_SCREAMING_SNAKE_CASE )] , self.segment_tree[self.right(_SCREAMING_SNAKE_CASE )] )
return True
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int | float:
"""simple docstring"""
if self.flag[idx] is True:
UpperCamelCase = self.lazy[idx]
UpperCamelCase = False
if left_element != right_element:
UpperCamelCase = self.lazy[idx]
UpperCamelCase = self.lazy[idx]
UpperCamelCase = True
UpperCamelCase = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
UpperCamelCase = (left_element + right_element) // 2
UpperCamelCase = self.query(self.left(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self.query(self.right(_SCREAMING_SNAKE_CASE ) , mid + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __str__( self ) -> str:
"""simple docstring"""
return str([self.query(1 , 1 , self.size , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8]
SCREAMING_SNAKE_CASE__ = 1_5
SCREAMING_SNAKE_CASE__ = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 1_1))
print(segt.query(1, 1, size, 7, 1_2))
segt.update(1, 1, size, 1, 3, 1_1_1)
print(segt.query(1, 1, size, 1, 1_5))
segt.update(1, 1, size, 7, 8, 2_3_5)
print(segt)
| 321 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class A__ :
def __init__( self : List[Any] , _a : Dict , _a : int=13 , _a : Optional[Any]=7 , _a : int=6 , _a : Tuple=17 , _a : str=23 , _a : Optional[Any]=11 , _a : List[Any]=True , ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =seq_length
_SCREAMING_SNAKE_CASE =act_dim
_SCREAMING_SNAKE_CASE =state_dim
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =max_length
_SCREAMING_SNAKE_CASE =is_training
def A ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
_SCREAMING_SNAKE_CASE =floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
_SCREAMING_SNAKE_CASE =floats_tensor((self.batch_size, self.seq_length, 1) )
_SCREAMING_SNAKE_CASE =floats_tensor((self.batch_size, self.seq_length, 1) )
_SCREAMING_SNAKE_CASE =ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
_SCREAMING_SNAKE_CASE =random_attention_mask((self.batch_size, self.seq_length) )
_SCREAMING_SNAKE_CASE =self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def A ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def A ( self : Optional[int] , _a : Any , _a : Optional[int] , _a : Tuple , _a : Optional[int] , _a : List[str] , _a : Dict , _a : Optional[int] , ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =DecisionTransformerModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_SCREAMING_SNAKE_CASE =model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def A ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) =config_and_inputs
_SCREAMING_SNAKE_CASE ={
'states': states,
'actions': actions,
'rewards': rewards,
'returns_to_go': returns_to_go,
'timesteps': timesteps,
'attention_mask': attention_mask,
}
return config, inputs_dict
@require_torch
class A__ ( A__ , A__ , A__ , unittest.TestCase ):
A__ = (DecisionTransformerModel,) if is_torch_available() else ()
A__ = ()
A__ = {'feature-extraction': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
A__ = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
A__ = False
A__ = False
A__ = False
A__ = False
A__ = False
A__ = False
A__ = False
A__ = False
A__ = False
def A ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =DecisionTransformerModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def A ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : List[Any] ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
@slow
def A ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE =DecisionTransformerModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def A ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE =[
'states',
'actions',
'rewards',
'returns_to_go',
'timesteps',
'attention_mask',
]
self.assertListEqual(arg_names[: len(_SCREAMING_SNAKE_CASE )] , _SCREAMING_SNAKE_CASE )
@require_torch
class A__ ( unittest.TestCase ):
@slow
def A ( self : int ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =2 # number of steps of autoregressive prediction we will perform
_SCREAMING_SNAKE_CASE =10 # defined by the RL environment, may be normalized
_SCREAMING_SNAKE_CASE =DecisionTransformerModel.from_pretrained('edbeeching/decision-transformer-gym-hopper-expert' )
_SCREAMING_SNAKE_CASE =model.to(_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE =model.config
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 1 , config.state_dim ).to(device=_SCREAMING_SNAKE_CASE , dtype=torch.floataa ) # env.reset()
_SCREAMING_SNAKE_CASE =torch.tensor(
[[0.24_27_93, -0.28_69_30_74, 0.8_74_26_13], [0.67_81_52_74, -0.08_10_10_85, -0.12_95_21_47]] , device=_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE =torch.tensor(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=torch.floataa ).reshape(1 , 1 , 1 )
_SCREAMING_SNAKE_CASE =state
_SCREAMING_SNAKE_CASE =torch.zeros(1 , 0 , config.act_dim , device=_SCREAMING_SNAKE_CASE , dtype=torch.floataa )
_SCREAMING_SNAKE_CASE =torch.zeros(1 , 0 , device=_SCREAMING_SNAKE_CASE , dtype=torch.floataa )
_SCREAMING_SNAKE_CASE =torch.tensor(0 , device=_SCREAMING_SNAKE_CASE , dtype=torch.long ).reshape(1 , 1 )
for step in range(_SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE =torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=_SCREAMING_SNAKE_CASE )] , dim=1 )
_SCREAMING_SNAKE_CASE =torch.cat([rewards, torch.zeros(1 , 1 , device=_SCREAMING_SNAKE_CASE )] , dim=1 )
_SCREAMING_SNAKE_CASE =torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =model(
states=_SCREAMING_SNAKE_CASE , actions=_SCREAMING_SNAKE_CASE , rewards=_SCREAMING_SNAKE_CASE , returns_to_go=_SCREAMING_SNAKE_CASE , timesteps=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=_SCREAMING_SNAKE_CASE , dtype=torch.floataa ),
1.0,
False,
{},
)
_SCREAMING_SNAKE_CASE =action_pred[0, -1]
_SCREAMING_SNAKE_CASE =torch.cat([states, state] , dim=1 )
_SCREAMING_SNAKE_CASE =returns_to_go[0, -1] - reward
_SCREAMING_SNAKE_CASE =torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
_SCREAMING_SNAKE_CASE =torch.cat(
[timesteps, torch.ones((1, 1) , device=_SCREAMING_SNAKE_CASE , dtype=torch.long ) * (step + 1)] , dim=1 )
| 47 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase = 1000 )-> int:
UpperCamelCase = -1
UpperCamelCase = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
UpperCamelCase = (n * n - 2 * a * n) // (2 * n - 2 * a)
UpperCamelCase = n - a - b
if c * c == (a * a + b * b):
UpperCamelCase = a * b * c
if candidate >= product:
UpperCamelCase = candidate
return product
if __name__ == "__main__":
print(f'{solution() = }')
| 321 | 0 |
"""simple docstring"""
import math
from datetime import datetime, timedelta
def __UpperCAmelCase ( UpperCAmelCase_ : Dict ) -> datetime:
'''simple docstring'''
__snake_case : int = year % 19
__snake_case : str = year % 4
__snake_case : int = year % 7
__snake_case : Optional[Any] = math.floor(year / 1_00 )
__snake_case : Dict = math.floor((13 + 8 * leap_day_inhibits) / 25 )
__snake_case : str = leap_day_inhibits / 4
__snake_case : Dict = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
__snake_case : int = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__snake_case : Optional[int] = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
__snake_case : List[str] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(__UpperCamelCase , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(__UpperCamelCase , 4 , 18 )
else:
return datetime(__UpperCamelCase , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_994, 2_000, 2_010, 2_021, 2_023):
_a : List[str]= "will be" if year > datetime.now().year else "was"
print(f'''Easter in {year} {tense} {gauss_easter(year)}''')
| 172 |
'''simple docstring'''
import argparse
import struct
import unittest
class a_ :
def __init__( self , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
UpperCamelCase = data
# Initialize hash values
UpperCamelCase = [
0x6A_09_E6_67,
0xBB_67_AE_85,
0x3C_6E_F3_72,
0xA5_4F_F5_3A,
0x51_0E_52_7F,
0x9B_05_68_8C,
0x1F_83_D9_AB,
0x5B_E0_CD_19,
]
# Initialize round constants
UpperCamelCase = [
0x42_8A_2F_98,
0x71_37_44_91,
0xB5_C0_FB_CF,
0xE9_B5_DB_A5,
0x39_56_C2_5B,
0x59_F1_11_F1,
0x92_3F_82_A4,
0xAB_1C_5E_D5,
0xD8_07_AA_98,
0x12_83_5B_01,
0x24_31_85_BE,
0x55_0C_7D_C3,
0x72_BE_5D_74,
0x80_DE_B1_FE,
0x9B_DC_06_A7,
0xC1_9B_F1_74,
0xE4_9B_69_C1,
0xEF_BE_47_86,
0x0F_C1_9D_C6,
0x24_0C_A1_CC,
0x2D_E9_2C_6F,
0x4A_74_84_AA,
0x5C_B0_A9_DC,
0x76_F9_88_DA,
0x98_3E_51_52,
0xA8_31_C6_6D,
0xB0_03_27_C8,
0xBF_59_7F_C7,
0xC6_E0_0B_F3,
0xD5_A7_91_47,
0x06_CA_63_51,
0x14_29_29_67,
0x27_B7_0A_85,
0x2E_1B_21_38,
0x4D_2C_6D_FC,
0x53_38_0D_13,
0x65_0A_73_54,
0x76_6A_0A_BB,
0x81_C2_C9_2E,
0x92_72_2C_85,
0xA2_BF_E8_A1,
0xA8_1A_66_4B,
0xC2_4B_8B_70,
0xC7_6C_51_A3,
0xD1_92_E8_19,
0xD6_99_06_24,
0xF4_0E_35_85,
0x10_6A_A0_70,
0x19_A4_C1_16,
0x1E_37_6C_08,
0x27_48_77_4C,
0x34_B0_BC_B5,
0x39_1C_0C_B3,
0x4E_D8_AA_4A,
0x5B_9C_CA_4F,
0x68_2E_6F_F3,
0x74_8F_82_EE,
0x78_A5_63_6F,
0x84_C8_78_14,
0x8C_C7_02_08,
0x90_BE_FF_FA,
0xA4_50_6C_EB,
0xBE_F9_A3_F7,
0xC6_71_78_F2,
]
UpperCamelCase = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def A__ ( _SCREAMING_SNAKE_CASE ) -> bytes:
"""simple docstring"""
UpperCamelCase = B"""\x80""" + (B"""\x00""" * (63 - (len(_SCREAMING_SNAKE_CASE ) + 8) % 64))
UpperCamelCase = struct.pack(""">Q""" , (len(_SCREAMING_SNAKE_CASE ) * 8) )
return data + padding + big_endian_integer
def A__ ( self ) -> None:
"""simple docstring"""
UpperCamelCase = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
UpperCamelCase = list(struct.unpack(""">16L""" , _SCREAMING_SNAKE_CASE ) )
# add 48 0-ed integers
words += [0] * 48
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
UpperCamelCase = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
UpperCamelCase = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
UpperCamelCase = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_00_00_00_00
# Compression
UpperCamelCase = self.ror(_SCREAMING_SNAKE_CASE , 6 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 11 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 25 )
UpperCamelCase = (e & f) ^ ((~e & 0xFF_FF_FF_FF) & g)
UpperCamelCase = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_00_00_00_00
UpperCamelCase = self.ror(_SCREAMING_SNAKE_CASE , 2 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 13 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 22 )
UpperCamelCase = (a & b) ^ (a & c) ^ (b & c)
UpperCamelCase = (sa + maj) % 0x1_00_00_00_00
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = (
g,
f,
e,
((d + tempa) % 0x1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0x1_00_00_00_00),
)
UpperCamelCase = [a, b, c, d, e, f, g, h]
# Modify final values
UpperCamelCase = [
((element + mutated_hash_values[index]) % 0x1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
UpperCamelCase = """""".join([hex(_SCREAMING_SNAKE_CASE )[2:].zfill(8 ) for value in self.hashes] )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return 0xFF_FF_FF_FF & (value << (32 - rotations)) | (value >> rotations)
class a_ ( unittest.TestCase ):
def A__ ( self ) -> None:
"""simple docstring"""
import hashlib
UpperCamelCase = bytes("""Test String""" , """utf-8""" )
self.assertEqual(SHAaaa(_SCREAMING_SNAKE_CASE ).hash , hashlib.shaaaa(_SCREAMING_SNAKE_CASE ).hexdigest() )
def lowercase__ ( )-> None:
import doctest
doctest.testmod()
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
UpperCamelCase = parser.parse_args()
UpperCamelCase = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
UpperCamelCase = f.read()
else:
UpperCamelCase = bytes(__UpperCamelCase , """utf-8""" )
print(SHAaaa(__UpperCamelCase ).hash )
if __name__ == "__main__":
main()
| 321 | 0 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''nielsr/canine-s''': 2048,
}
# Unicode defines 1,114,112 total “codepoints”
a__ = 1114112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
a__ = 0
a__ = 0XE0_00
a__ = 0XE0_01
a__ = 0XE0_02
a__ = 0XE0_03
a__ = 0XE0_04
# Maps special codepoints to human-readable names.
a__ = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: '''[CLS]''',
SEP: '''[SEP]''',
BOS: '''[BOS]''',
MASK: '''[MASK]''',
PAD: '''[PAD]''',
RESERVED: '''[RESERVED]''',
}
# Maps special codepoint human-readable names to their codepoint values.
a__ = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _a=chr(_SCREAMING_SNAKE_CASE ) , _a=chr(_SCREAMING_SNAKE_CASE ) , _a=chr(_SCREAMING_SNAKE_CASE ) , _a=chr(_SCREAMING_SNAKE_CASE ) , _a=chr(_SCREAMING_SNAKE_CASE ) , _a=chr(_SCREAMING_SNAKE_CASE ) , _a=False , _a=2_0_4_8 , **_a , ) -> Union[str, Any]:
_a : List[str] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else bos_token
_a : Tuple = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else eos_token
_a : Tuple = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else sep_token
_a : Dict = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else cls_token
_a : Optional[int] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_a : List[str] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , model_max_length=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Creates a mapping for looking up the IDs of special symbols.
_a : List[str] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
_a : Optional[int] = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
_a : str = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
_a : Optional[Any] = UNICODE_VOCAB_SIZE
_a : Optional[Any] = len(self._special_codepoints )
@property
def __lowercase ( self ) -> int:
return self._unicode_vocab_size
def __lowercase ( self , _a ) -> List[str]:
return list(_SCREAMING_SNAKE_CASE )
def __lowercase ( self , _a ) -> int:
try:
return ord(_SCREAMING_SNAKE_CASE )
except TypeError:
raise ValueError(F"""invalid token: '{token}'""" )
def __lowercase ( self , _a ) -> str:
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(_SCREAMING_SNAKE_CASE )
except TypeError:
raise ValueError(F"""invalid id: {index}""" )
def __lowercase ( self , _a ) -> str:
return "".join(_SCREAMING_SNAKE_CASE )
def __lowercase ( self , _a , _a = None ) -> List[int]:
_a : Tuple = [self.sep_token_id]
_a : Optional[int] = [self.cls_token_id]
_a : str = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def __lowercase ( self , _a , _a = None , _a = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
_a : Optional[Any] = [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
if token_ids_a is not None:
result += ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return result
def __lowercase ( self , _a , _a = None ) -> List[int]:
_a : List[str] = [self.sep_token_id]
_a : Dict = [self.cls_token_id]
_a : Tuple = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def __lowercase ( self , _a , _a = None ) -> Optional[int]:
return ()
| 235 |
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
SCREAMING_SNAKE_CASE__ = _symbol_database.Default()
SCREAMING_SNAKE_CASE__ = _descriptor_pool.Default().AddSerializedFile(
b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'
)
SCREAMING_SNAKE_CASE__ = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = b'H\003'
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
SCREAMING_SNAKE_CASE__ = 4_5
SCREAMING_SNAKE_CASE__ = 1_5_8_1
SCREAMING_SNAKE_CASE__ = 1_5_1_7
SCREAMING_SNAKE_CASE__ = 1_5_7_0
SCREAMING_SNAKE_CASE__ = 1_5_8_4
SCREAMING_SNAKE_CASE__ = 1_7_9_3
SCREAMING_SNAKE_CASE__ = 1_7_9_5
SCREAMING_SNAKE_CASE__ = 1_9_1_6
SCREAMING_SNAKE_CASE__ = 1_8_6_4
SCREAMING_SNAKE_CASE__ = 1_9_0_5
SCREAMING_SNAKE_CASE__ = 1_9_1_9
SCREAMING_SNAKE_CASE__ = 2_4_2_9
SCREAMING_SNAKE_CASE__ = 2_2_0_8
SCREAMING_SNAKE_CASE__ = 2_4_1_8
SCREAMING_SNAKE_CASE__ = 2_3_2_3
SCREAMING_SNAKE_CASE__ = 2_4_0_7
# @@protoc_insertion_point(module_scope)
| 321 | 0 |
"""simple docstring"""
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a :str = logging.get_logger(__name__)
a :List[Any] = "▁"
a :List[Any] = {"vocab_file": "prophetnet.tokenizer"}
a :List[str] = {
"vocab_file": {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"
),
}
}
a :Optional[int] = {
"microsoft/xprophetnet-large-wiki100-cased": {"do_lower_case": False},
}
a :str = {
"microsoft/xprophetnet-large-wiki100-cased": 512,
}
def _lowercase ( __lowerCAmelCase ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Tuple = collections.OrderedDict()
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" ) as reader:
SCREAMING_SNAKE_CASE__ : List[Any] = reader.readlines()
for index, token in enumerate(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ : Dict = token.rstrip("""\n""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = index
return vocab
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Tuple = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE :Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE :Dict = ["""input_ids""", """attention_mask"""]
def __init__( self , _a , _a="[SEP]" , _a="[SEP]" , _a="[SEP]" , _a="[UNK]" , _a="[PAD]" , _a="[CLS]" , _a="[MASK]" , _a = None , **_a , ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"""You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"""
""" pip install sentencepiece""" )
raise
SCREAMING_SNAKE_CASE__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE__ : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
SCREAMING_SNAKE_CASE__ : int = {"""[PAD]""": 0, """[CLS]""": 1, """[SEP]""": 2, """[UNK]""": 3, """[MASK]""": 4}
for i in range(10 ):
SCREAMING_SNAKE_CASE__ : List[Any] = f'''[unused{i}]'''
SCREAMING_SNAKE_CASE__ : List[str] = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE__ : Dict = 12
SCREAMING_SNAKE_CASE__ : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(_SCREAMING_SNAKE_CASE )
def __getstate__( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ : Any = None
return state
def __setstate__( self , _a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"""You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"""
""" pip install sentencepiece""" )
raise
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
SCREAMING_SNAKE_CASE__ : List[Any] = {}
SCREAMING_SNAKE_CASE__ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self , _a , _a = None , _a = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def _a ( self , _a , _a = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _a ( self ) -> Any:
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a ( self , _a ) -> str:
"""simple docstring"""
return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
def _a ( self , _a ) -> Optional[Any]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE__ : List[str] = self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _a ( self , _a ) -> Optional[Any]:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _a ( self , _a ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = """""".join(_SCREAMING_SNAKE_CASE ).replace(_SCREAMING_SNAKE_CASE , """ """ ).strip()
return out_string
def _a ( self , _a , _a = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , """wb""" ) as fi:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def _a ( self , _a , _a = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : List[str] = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 132 |
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 8.31_44_62 # Unit - J mol-1 K-1
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 321 | 0 |
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """Salesforce/blip-image-captioning-base"""
UpperCamelCase_ = (
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
UpperCamelCase_ = """image_captioner"""
UpperCamelCase_ = AutoModelForVisionaSeq
UpperCamelCase_ = ["""image"""]
UpperCamelCase_ = ["""text"""]
def __init__( self : Optional[Any] , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['''vision'''] )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __A ( self : int , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
return self.pre_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
def __A ( self : int , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
return self.model.generate(**_SCREAMING_SNAKE_CASE )
def __A ( self : Optional[int] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
return self.pre_processor.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )[0].strip()
| 182 |
'''simple docstring'''
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
SCREAMING_SNAKE_CASE__ = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
SCREAMING_SNAKE_CASE__ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase__ ( __UpperCamelCase )-> str:
if "://" in dataset_path:
UpperCamelCase = dataset_path.split("""://""" )[1]
return dataset_path
def lowercase__ ( __UpperCamelCase )-> bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> int:
UpperCamelCase = not is_remote_filesystem(__UpperCamelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__UpperCamelCase ) , fs._strip_protocol(__UpperCamelCase ) )
else:
fs.mv(__UpperCamelCase , __UpperCamelCase , recursive=__UpperCamelCase )
def lowercase__ ( )-> None:
if hasattr(fsspec.asyn , """reset_lock""" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = threading.Lock()
| 321 | 0 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
_UpperCamelCase = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
_UpperCamelCase = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
_UpperCamelCase = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
"""simple docstring"""
def _lowercase ( self : Union[str, Any] ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': {
'id': datasets.Value('string' ),
'prediction_text': datasets.features.Sequence(datasets.Value('string' ) ),
},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ) , codebase_urls=['https://www.atticusprojectai.org/cuad'] , reference_urls=['https://www.atticusprojectai.org/cuad'] , )
def _lowercase ( self : Dict , _a : Dict , _a : int ) -> Optional[int]:
__lowerCamelCase : Optional[int] = {prediction['id']: prediction['prediction_text'] for prediction in predictions}
__lowerCamelCase : Tuple = [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
__lowerCamelCase : Optional[int] = evaluate(dataset=_SCREAMING_SNAKE_CASE , predictions=_SCREAMING_SNAKE_CASE )
return score
| 208 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
'configuration_xlm_roberta_xl': [
'XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaXLConfig',
'XLMRobertaXLOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaXLForCausalLM',
'XLMRobertaXLForMaskedLM',
'XLMRobertaXLForMultipleChoice',
'XLMRobertaXLForQuestionAnswering',
'XLMRobertaXLForSequenceClassification',
'XLMRobertaXLForTokenClassification',
'XLMRobertaXLModel',
'XLMRobertaXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 321 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
a__ : Optional[int] = logging.get_logger(__name__)
a__ : Dict = {
"Salesforce/instruct-blip-flan-t5": "https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json",
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
UpperCAmelCase__ : List[str] = 'instructblip_vision_model'
def __init__( self :int , _A :List[str]=1_408 , _A :Union[str, Any]=6_144 , _A :Any=39 , _A :Optional[int]=16 , _A :List[Any]=224 , _A :List[Any]=14 , _A :str="gelu" , _A :Tuple=1E-6 , _A :List[str]=0.0 , _A :str=1E-10 , _A :List[Any]=True , **_A :Optional[int] , ) -> Dict:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
__A = hidden_size
__A = intermediate_size
__A = num_hidden_layers
__A = num_attention_heads
__A = patch_size
__A = image_size
__A = initializer_range
__A = attention_dropout
__A = layer_norm_eps
__A = hidden_act
__A = qkv_bias
@classmethod
def lowercase_ ( cls :List[Any] , _A :Dict , **_A :str ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE )
__A , __A = cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__A = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
UpperCAmelCase__ : Dict = 'instructblip_qformer'
def __init__( self :Union[str, Any] , _A :List[str]=30_522 , _A :str=768 , _A :Union[str, Any]=12 , _A :Union[str, Any]=12 , _A :Any=3_072 , _A :Dict="gelu" , _A :int=0.1 , _A :Dict=0.1 , _A :Union[str, Any]=512 , _A :Union[str, Any]=0.02 , _A :List[str]=1E-12 , _A :int=0 , _A :Union[str, Any]="absolute" , _A :Any=2 , _A :List[Any]=1_408 , **_A :Optional[int] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = hidden_act
__A = intermediate_size
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = initializer_range
__A = layer_norm_eps
__A = position_embedding_type
__A = cross_attention_frequency
__A = encoder_hidden_size
@classmethod
def lowercase_ ( cls :Optional[int] , _A :List[str] , **_A :int ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE )
__A , __A = cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__A = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
UpperCAmelCase__ : Optional[int] = 'instructblip'
UpperCAmelCase__ : Tuple = True
def __init__( self :List[str] , _A :str=None , _A :str=None , _A :List[str]=None , _A :Optional[Any]=32 , **_A :Any ) -> Optional[int]:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
if vision_config is None:
__A = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
__A = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
__A = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
__A = InstructBlipVisionConfig(**_SCREAMING_SNAKE_CASE )
__A = InstructBlipQFormerConfig(**_SCREAMING_SNAKE_CASE )
__A = text_config['model_type'] if 'model_type' in text_config else 'opt'
__A = CONFIG_MAPPING[text_model_type](**_SCREAMING_SNAKE_CASE )
__A = self.text_config.tie_word_embeddings
__A = self.text_config.is_encoder_decoder
__A = num_query_tokens
__A = self.vision_config.hidden_size
__A = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__A = 1.0
__A = 0.02
@classmethod
def lowercase_ ( cls :List[Any] , _A :List[str] , _A :Any , _A :int , **_A :Optional[Any] , ) -> Union[str, Any]:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_SCREAMING_SNAKE_CASE , )
def lowercase_ ( self :List[Any] ) -> List[str]:
'''simple docstring'''
__A = copy.deepcopy(self.__dict__ )
__A = self.vision_config.to_dict()
__A = self.qformer_config.to_dict()
__A = self.text_config.to_dict()
__A = self.__class__.model_type
return output
| 161 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
SCREAMING_SNAKE_CASE__ = 'docs/source/en/_toctree.yml'
def lowercase__ ( __UpperCamelCase )-> Optional[Any]:
UpperCamelCase = defaultdict(__UpperCamelCase )
UpperCamelCase = []
UpperCamelCase = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} )
else:
new_doc_list.append(__UpperCamelCase )
UpperCamelCase = new_doc_list
UpperCamelCase = [key for key, value in counts.items() if value > 1]
UpperCamelCase = []
for duplicate_key in duplicates:
UpperCamelCase = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} )
if len(__UpperCamelCase ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] )
UpperCamelCase = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(__UpperCamelCase ) > 1:
raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" )
overview_doc.extend(__UpperCamelCase )
# Sort
return overview_doc
def lowercase__ ( __UpperCamelCase=False )-> List[str]:
with open(__UpperCamelCase , encoding="""utf-8""" ) as f:
UpperCamelCase = yaml.safe_load(f.read() )
# Get to the API doc
UpperCamelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCamelCase = content[api_idx]["""sections"""]
# Then to the model doc
UpperCamelCase = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
UpperCamelCase = api_doc[scheduler_idx]["""sections"""]
UpperCamelCase = clean_doc_toc(__UpperCamelCase )
UpperCamelCase = False
if new_scheduler_doc != scheduler_doc:
UpperCamelCase = True
if overwrite:
UpperCamelCase = new_scheduler_doc
if diff:
if overwrite:
UpperCamelCase = api_doc
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(__UpperCamelCase , allow_unicode=__UpperCamelCase ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
def lowercase__ ( __UpperCamelCase=False )-> Tuple:
with open(__UpperCamelCase , encoding="""utf-8""" ) as f:
UpperCamelCase = yaml.safe_load(f.read() )
# Get to the API doc
UpperCamelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCamelCase = content[api_idx]["""sections"""]
# Then to the model doc
UpperCamelCase = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
UpperCamelCase = False
UpperCamelCase = api_doc[pipeline_idx]["""sections"""]
UpperCamelCase = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
UpperCamelCase = pipeline_doc["""section"""]
UpperCamelCase = clean_doc_toc(__UpperCamelCase )
if overwrite:
UpperCamelCase = new_sub_pipeline_doc
new_pipeline_docs.append(__UpperCamelCase )
# sort overall pipeline doc
UpperCamelCase = clean_doc_toc(__UpperCamelCase )
if new_pipeline_docs != pipeline_docs:
UpperCamelCase = True
if overwrite:
UpperCamelCase = new_pipeline_docs
if diff:
if overwrite:
UpperCamelCase = api_doc
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(__UpperCamelCase , allow_unicode=__UpperCamelCase ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 321 | 0 |
'''simple docstring'''
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
__snake_case = logging.get_logger(__name__)
class lowercase :
"""simple docstring"""
_a = None
@experimental
def a ( __a , __a , __a , __a , __a , __a , __a ) -> List[Any]:
'''simple docstring'''
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return _map_with_joblib(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def a ( __a , __a , __a , __a , __a , __a , __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :str = num_proc if num_proc <= len(__UpperCamelCase ) else len(__UpperCamelCase )
UpperCamelCase__ :Optional[Any] = [] # We organize the splits ourselve (contiguous splits)
for index in range(__UpperCamelCase ):
UpperCamelCase__ :List[str] = len(__UpperCamelCase ) // num_proc
UpperCamelCase__ :str = len(__UpperCamelCase ) % num_proc
UpperCamelCase__ :Dict = div * index + min(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase__ :Optional[int] = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(__UpperCamelCase ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
f'''Error dividing inputs iterable among processes. '''
f'''Total number of objects {len(__UpperCamelCase )}, '''
f'''length: {sum(len(i[1] ) for i in split_kwds )}''' )
logger.info(
f'''Spawning {num_proc} processes for {len(__UpperCamelCase )} objects in slices of {[len(i[1] ) for i in split_kwds]}''' )
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = None, None
if not disable_tqdm:
UpperCamelCase__ , UpperCamelCase__ :str = (RLock(),), tqdm.set_lock
with Pool(__UpperCamelCase , initargs=__UpperCamelCase , initializer=__UpperCamelCase ) as pool:
UpperCamelCase__ :List[Any] = pool.map(__UpperCamelCase , __UpperCamelCase )
logger.info(f'''Finished {num_proc} processes''' )
UpperCamelCase__ :Any = [obj for proc_res in mapped for obj in proc_res]
logger.info(f'''Unpacked {len(__UpperCamelCase )} objects''' )
return mapped
def a ( __a , __a , __a , __a , __a , __a , __a ) -> Union[str, Any]:
'''simple docstring'''
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=__UpperCamelCase ):
return joblib.Parallel()(
joblib.delayed(__UpperCamelCase )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def a ( __a ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :Any = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
UpperCamelCase__ :Optional[int] = None | 97 |
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[str]:
UpperCamelCase = 1.5
UpperCamelCase = int(factor * num_class_images )
UpperCamelCase = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=__UpperCamelCase , aesthetic_weight=0.1 )
os.makedirs(F"{class_data_dir}/images" , exist_ok=__UpperCamelCase )
if len(list(Path(F"{class_data_dir}/images" ).iterdir() ) ) >= num_class_images:
return
while True:
UpperCamelCase = client.query(text=__UpperCamelCase )
if len(__UpperCamelCase ) >= factor * num_class_images or num_images > 1E4:
break
else:
UpperCamelCase = int(factor * num_images )
UpperCamelCase = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=__UpperCamelCase , aesthetic_weight=0.1 , )
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = tqdm(desc="""downloading real regularization images""" , total=__UpperCamelCase )
with open(F"{class_data_dir}/caption.txt" , """w""" ) as fa, open(F"{class_data_dir}/urls.txt" , """w""" ) as fa, open(
F"{class_data_dir}/images.txt" , """w""" ) as fa:
while total < num_class_images:
UpperCamelCase = class_images[count]
count += 1
try:
UpperCamelCase = requests.get(images["""url"""] )
if img.status_code == 200:
UpperCamelCase = Image.open(BytesIO(img.content ) )
with open(F"{class_data_dir}/images/{total}.jpg" , """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(F"{class_data_dir}/images/{total}.jpg" + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowercase__ ( )-> str:
UpperCamelCase = argparse.ArgumentParser("""""" , add_help=__UpperCamelCase )
parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=__UpperCamelCase , type=__UpperCamelCase )
parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=__UpperCamelCase , type=__UpperCamelCase )
parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=__UpperCamelCase )
return parser.parse_args()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 321 | 0 |
def _a ( lowerCamelCase = 1000 ):
lowerCamelCase : int = 2**power
lowerCamelCase : Tuple = str(__UpperCamelCase )
lowerCamelCase : Dict = list(__UpperCamelCase )
lowerCamelCase : str = 0
for i in list_num:
sum_of_num += int(__UpperCamelCase )
return sum_of_num
if __name__ == "__main__":
_lowerCamelCase =int(input("""Enter the power of 2: """).strip())
print("""2 ^ """, power, """ = """, 2**power)
_lowerCamelCase =solution(power)
print("""Sum of the digits is: """, result)
| 287 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
@dataclass
class a_ :
lowercase = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
lowercase = field(
default=lowerCamelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
lowercase = field(
default=lowerCamelCase , metadata={"""help""": """The column name of the images in the files."""} )
lowercase = field(default=lowerCamelCase , metadata={"""help""": """A folder containing the training data."""} )
lowercase = field(default=lowerCamelCase , metadata={"""help""": """A folder containing the validation data."""} )
lowercase = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
lowercase = field(
default=lowerCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowercase = field(
default=lowerCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = {}
if self.train_dir is not None:
UpperCamelCase = self.train_dir
if self.validation_dir is not None:
UpperCamelCase = self.validation_dir
UpperCamelCase = data_files if data_files else None
@dataclass
class a_ :
lowercase = field(
default=lowerCamelCase , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
lowercase = field(
default=lowerCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
lowercase = field(
default=lowerCamelCase , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
lowercase = field(
default=lowerCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
lowercase = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowercase = field(default=lowerCamelCase , metadata={"""help""": """Name or path of preprocessor config."""} )
lowercase = field(
default=lowerCamelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
lowercase = field(
default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
lowercase = field(
default=lowerCamelCase , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class a_ ( lowerCamelCase ):
lowercase = field(
default=1E-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def lowercase__ ( __UpperCamelCase )-> int:
UpperCamelCase = torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def lowercase__ ( )-> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase ,UpperCamelCase ,UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase ,UpperCamelCase ,UpperCamelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , __UpperCamelCase , __UpperCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase = training_args.get_process_log_level()
logger.setLevel(__UpperCamelCase )
transformers.utils.logging.set_verbosity(__UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
UpperCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
UpperCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCamelCase = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __UpperCamelCase ) and data_args.train_val_split > 0.0:
UpperCamelCase = ds["""train"""].train_test_split(data_args.train_val_split )
UpperCamelCase = split["""train"""]
UpperCamelCase = split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
UpperCamelCase = ViTMAEConfig.from_pretrained(model_args.config_name , **__UpperCamelCase )
elif model_args.model_name_or_path:
UpperCamelCase = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__UpperCamelCase )
else:
UpperCamelCase = ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(F"New config: {config}" )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
UpperCamelCase = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__UpperCamelCase )
elif model_args.model_name_or_path:
UpperCamelCase = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__UpperCamelCase )
else:
UpperCamelCase = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
UpperCamelCase = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
UpperCamelCase = ViTMAEForPreTraining(__UpperCamelCase )
if training_args.do_train:
UpperCamelCase = ds["""train"""].column_names
else:
UpperCamelCase = ds["""validation"""].column_names
if data_args.image_column_name is not None:
UpperCamelCase = data_args.image_column_name
elif "image" in column_names:
UpperCamelCase = """image"""
elif "img" in column_names:
UpperCamelCase = """img"""
else:
UpperCamelCase = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
UpperCamelCase = image_processor.size["""shortest_edge"""]
else:
UpperCamelCase = (image_processor.size["""height"""], image_processor.size["""width"""])
UpperCamelCase = Compose(
[
Lambda(lambda __UpperCamelCase : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(__UpperCamelCase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(__UpperCamelCase ):
UpperCamelCase = [transforms(__UpperCamelCase ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
UpperCamelCase = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__UpperCamelCase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
UpperCamelCase = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__UpperCamelCase )
# Compute absolute learning rate
UpperCamelCase = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
UpperCamelCase = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
UpperCamelCase = Trainer(
model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=__UpperCamelCase , data_collator=__UpperCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase = last_checkpoint
UpperCamelCase = trainer.train(resume_from_checkpoint=__UpperCamelCase )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase = trainer.evaluate()
trainer.log_metrics("""eval""" , __UpperCamelCase )
trainer.save_metrics("""eval""" , __UpperCamelCase )
# Write model card and (optionally) push to hub
UpperCamelCase = {
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__UpperCamelCase )
else:
trainer.create_model_card(**__UpperCamelCase )
def lowercase__ ( __UpperCamelCase )-> List[str]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 321 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class __A :
'''simple docstring'''
def __init__( self : Union[str, Any] ,_snake_case : Union[str, Any] ,) -> str:
"""simple docstring"""
lowercase__ : List[Any] = parent
lowercase__ : Optional[Any] = 13
lowercase__ : Union[str, Any] = 7
lowercase__ : Any = True
lowercase__ : Any = True
lowercase__ : Any = True
lowercase__ : List[Any] = 99
lowercase__ : Optional[Any] = 32
lowercase__ : Any = 2
lowercase__ : int = 4
lowercase__ : Optional[Any] = 37
lowercase__ : Dict = '''gelu'''
lowercase__ : List[str] = 0.1
lowercase__ : Any = 0.1
lowercase__ : List[str] = 512
lowercase__ : Dict = 16
lowercase__ : Union[str, Any] = 2
lowercase__ : Tuple = 0.02
lowercase__ : Any = 3
lowercase__ : Optional[int] = 4
lowercase__ : List[str] = None
def UpperCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase__ : Optional[int] = None
if self.use_input_mask:
lowercase__ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : int = None
lowercase__ : Optional[Any] = None
lowercase__ : List[str] = None
if self.use_labels:
lowercase__ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowercase__ : List[Any] = ids_tensor([self.batch_size] ,self.num_choices )
lowercase__ : str = EsmConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,pad_token_id=1 ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : List[str] = self.prepare_config_and_inputs()
lowercase__ : Any = True
lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase ( self : List[str] ,_snake_case : Union[str, Any] ,_snake_case : List[Any] ,_snake_case : Optional[int] ,_snake_case : int ,_snake_case : List[str] ,_snake_case : Tuple ) -> Dict:
"""simple docstring"""
lowercase__ : Dict = TFEsmModel(config=_SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase__ : int = model(_SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = [input_ids, input_mask]
lowercase__ : List[Any] = model(_SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self : Dict ,_snake_case : Optional[int] ,_snake_case : Dict ,_snake_case : Dict ,_snake_case : str ,_snake_case : Optional[int] ,_snake_case : Dict ,_snake_case : Tuple ,_snake_case : int ,) -> str:
"""simple docstring"""
lowercase__ : Optional[Any] = True
lowercase__ : Tuple = TFEsmModel(config=_SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''encoder_hidden_states''': encoder_hidden_states,
'''encoder_attention_mask''': encoder_attention_mask,
}
lowercase__ : List[str] = model(_SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = [input_ids, input_mask]
lowercase__ : List[Any] = model(_SCREAMING_SNAKE_CASE ,encoder_hidden_states=_SCREAMING_SNAKE_CASE )
# Also check the case where encoder outputs are not passed
lowercase__ : Any = model(_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self : List[Any] ,_snake_case : List[Any] ,_snake_case : Optional[Any] ,_snake_case : List[Any] ,_snake_case : List[Any] ,_snake_case : Union[str, Any] ,_snake_case : List[str] ) -> int:
"""simple docstring"""
lowercase__ : List[str] = TFEsmForMaskedLM(config=_SCREAMING_SNAKE_CASE )
lowercase__ : Any = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self : Optional[int] ,_snake_case : List[Any] ,_snake_case : List[Any] ,_snake_case : Any ,_snake_case : Union[str, Any] ,_snake_case : Tuple ,_snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ : Optional[int] = self.num_labels
lowercase__ : List[Any] = TFEsmForTokenClassification(config=_SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase__ : List[str] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Tuple = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : List[Any] = config_and_inputs
lowercase__ : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase : str = (
{
"feature-extraction": TFEsmModel,
"fill-mask": TFEsmForMaskedLM,
"text-classification": TFEsmForSequenceClassification,
"token-classification": TFEsmForTokenClassification,
"zero-shot": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : List[str] = False
def UpperCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ : str = TFEsmModelTester(self )
lowercase__ : str = ConfigTester(self ,config_class=_SCREAMING_SNAKE_CASE ,hidden_size=37 )
def UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : int ) -> Any:
"""simple docstring"""
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Any = TFEsmModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@unittest.skip('''Protein models do not support embedding resizing.''' )
def UpperCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip('''Protein models do not support embedding resizing.''' )
def UpperCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Tuple = model_class(_SCREAMING_SNAKE_CASE )
assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowercase__ : int = model.get_bias()
assert isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
for k, v in name.items():
assert isinstance(_SCREAMING_SNAKE_CASE ,tf.Variable )
else:
lowercase__ : int = model.get_output_embeddings()
assert x is None
lowercase__ : Union[str, Any] = model.get_bias()
assert name is None
@require_tf
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : str ) -> int:
"""simple docstring"""
lowercase__ : Union[str, Any] = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
lowercase__ : Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase__ : List[str] = model(_SCREAMING_SNAKE_CASE )[0]
lowercase__ : Union[str, Any] = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) ,_SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
lowercase__ : Union[str, Any] = tf.constant(
[
[
[8.92_1518, -10.589_814, -6.467_1307],
[-6.396_7156, -13.911_377, -1.121_1915],
[-7.78_1247, -13.951_557, -3.74_0592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-2 ) )
@slow
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : int = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
lowercase__ : Tuple = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowercase__ : Any = model(_SCREAMING_SNAKE_CASE )[0]
# compare the actual values for a slice.
lowercase__ : Tuple = tf.constant(
[
[
[0.1444_3092, 0.5412_5327, 0.324_7739],
[0.3034_0484, 0.0052_6676, 0.3107_7722],
[0.3227_8043, -0.2498_7096, 0.341_4628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 16 |
'''simple docstring'''
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # pylint: disable=invalid-name
SCREAMING_SNAKE_CASE__ = 2_5_6
class a_ ( lowerCamelCase ):
lowercase = ["""melgan"""]
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> None:
"""simple docstring"""
super().__init__()
# From MELGAN
UpperCamelCase = math.log(1e-5 ) # Matches MelGAN training.
UpperCamelCase = 4.0 # Largest value for most examples
UpperCamelCase = 128
self.register_modules(
notes_encoder=_SCREAMING_SNAKE_CASE , continuous_encoder=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , melgan=_SCREAMING_SNAKE_CASE , )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=(-1.0, 1.0) , _SCREAMING_SNAKE_CASE=False ) -> Any:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = output_range
if clip:
UpperCamelCase = torch.clip(_SCREAMING_SNAKE_CASE , self.min_value , self.max_value )
# Scale to [0, 1].
UpperCamelCase = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=(-1.0, 1.0) , _SCREAMING_SNAKE_CASE=False ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = input_range
UpperCamelCase = torch.clip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if clip else outputs
# Scale to [0, 1].
UpperCamelCase = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = input_tokens > 0
UpperCamelCase ,UpperCamelCase = self.notes_encoder(
encoder_input_tokens=_SCREAMING_SNAKE_CASE , encoder_inputs_mask=_SCREAMING_SNAKE_CASE )
UpperCamelCase ,UpperCamelCase = self.continuous_encoder(
encoder_inputs=_SCREAMING_SNAKE_CASE , encoder_inputs_mask=_SCREAMING_SNAKE_CASE )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase = noise_time
if not torch.is_tensor(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(_SCREAMING_SNAKE_CASE ) and len(timesteps.shape ) == 0:
UpperCamelCase = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCamelCase = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
UpperCamelCase = self.decoder(
encodings_and_masks=_SCREAMING_SNAKE_CASE , decoder_input_tokens=_SCREAMING_SNAKE_CASE , decoder_noise_time=_SCREAMING_SNAKE_CASE )
return logits
@torch.no_grad()
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 100 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = "numpy" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
"""simple docstring"""
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(_SCREAMING_SNAKE_CASE )}." )
UpperCamelCase = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
UpperCamelCase = np.zeros([1, 0, self.n_dims] , np.floataa )
UpperCamelCase = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_SCREAMING_SNAKE_CASE , device=self.device )
for i, encoder_input_tokens in enumerate(_SCREAMING_SNAKE_CASE ):
if i == 0:
UpperCamelCase = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
UpperCamelCase = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_SCREAMING_SNAKE_CASE , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
UpperCamelCase = ones
UpperCamelCase = self.scale_features(
_SCREAMING_SNAKE_CASE , output_range=[-1.0, 1.0] , clip=_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_SCREAMING_SNAKE_CASE , continuous_mask=_SCREAMING_SNAKE_CASE , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
UpperCamelCase = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=_SCREAMING_SNAKE_CASE , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCamelCase = self.decode(
encodings_and_masks=_SCREAMING_SNAKE_CASE , input_tokens=_SCREAMING_SNAKE_CASE , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
UpperCamelCase = self.scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase = self.scale_to_features(_SCREAMING_SNAKE_CASE , input_range=[-1.0, 1.0] )
UpperCamelCase = mel[:1]
UpperCamelCase = mel.cpu().float().numpy()
UpperCamelCase = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
logger.info("""Generated segment""" , _SCREAMING_SNAKE_CASE )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"""Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'.""" )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"""Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'.""" )
if output_type == "numpy":
UpperCamelCase = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
UpperCamelCase = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=_SCREAMING_SNAKE_CASE )
| 321 | 0 |
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = filter(lambda __lowerCamelCase : p.requires_grad, model.parameters() )
UpperCAmelCase_ : Optional[int] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_a = logging.getLogger(__name__)
def __a ( __lowerCamelCase, __lowerCamelCase ):
if metric == "rouge2":
UpperCAmelCase_ : str = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
UpperCAmelCase_ : Optional[Any] = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
UpperCAmelCase_ : str = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
" function." )
UpperCAmelCase_ : int = ModelCheckpoint(
dirpath=__UpperCamelCase, filename=__UpperCamelCase, monitor=f"""val_{metric}""", mode="max", save_top_k=3, every_n_epochs=1, )
return checkpoint_callback
def __a ( __lowerCamelCase, __lowerCamelCase ):
return EarlyStopping(
monitor=f"""val_{metric}""", mode="min" if "loss" in metric else "max", patience=__UpperCamelCase, verbose=__UpperCamelCase, )
class A_ (pl.Callback ):
'''simple docstring'''
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Any = {F"""lr_group_{i}""": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_SCREAMING_SNAKE_CASE )
@rank_zero_only
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=True ):
"""simple docstring"""
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
UpperCAmelCase_ : Tuple = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
UpperCAmelCase_ : Optional[int] = Path(pl_module.hparams.output_dir )
if type_path == "test":
UpperCAmelCase_ : Optional[Any] = od / "test_results.txt"
UpperCAmelCase_ : Any = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
UpperCAmelCase_ : List[str] = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
UpperCAmelCase_ : Tuple = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
generations_file.parent.mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , "a+" ) as writer:
for key in sorted(_SCREAMING_SNAKE_CASE ):
if key in ["log", "progress_bar", "preds"]:
continue
UpperCAmelCase_ : Dict = metrics[key]
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
UpperCAmelCase_ : Union[str, Any] = val.item()
UpperCAmelCase_ : Optional[int] = F"""{key}: {val:.6f}\n"""
writer.write(_SCREAMING_SNAKE_CASE )
if not save_generations:
return
if "preds" in metrics:
UpperCAmelCase_ : Optional[Any] = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(_SCREAMING_SNAKE_CASE )
@rank_zero_only
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
try:
UpperCAmelCase_ : Optional[int] = pl_module.model.model.num_parameters()
except AttributeError:
UpperCAmelCase_ : Tuple = pl_module.model.num_parameters()
UpperCAmelCase_ : List[Any] = count_trainable_parameters(_SCREAMING_SNAKE_CASE )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1E6, "grad_mp": n_trainable_pars / 1E6} )
@rank_zero_only
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "test" )
@rank_zero_only
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 61 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase = 4000000 )-> int:
UpperCamelCase = []
UpperCamelCase ,UpperCamelCase = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__UpperCamelCase )
UpperCamelCase ,UpperCamelCase = b, a + b
return sum(__UpperCamelCase )
if __name__ == "__main__":
print(f'{solution() = }')
| 321 | 0 |
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
lowerCamelCase : Union[str, Any] = _symbol_database.Default()
lowerCamelCase : Dict = _descriptor_pool.Default().AddSerializedFile(
b"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
lowerCamelCase : str = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
lowerCamelCase : List[str] = None
lowerCamelCase : List[Any] = b"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
lowerCamelCase : List[Any] = 4_5
lowerCamelCase : str = 1_5_8_1
lowerCamelCase : Dict = 1_5_1_7
lowerCamelCase : Dict = 1_5_7_0
lowerCamelCase : Optional[int] = 1_5_8_4
lowerCamelCase : Tuple = 1_7_9_3
lowerCamelCase : Union[str, Any] = 1_7_9_5
lowerCamelCase : Union[str, Any] = 1_9_1_6
lowerCamelCase : Optional[int] = 1_8_6_4
lowerCamelCase : Tuple = 1_9_0_5
lowerCamelCase : List[str] = 1_9_1_9
lowerCamelCase : List[Any] = 2_4_2_9
lowerCamelCase : str = 2_2_0_8
lowerCamelCase : Dict = 2_4_1_8
lowerCamelCase : str = 2_3_2_3
lowerCamelCase : str = 2_4_0_7
# @@protoc_insertion_point(module_scope)
| 47 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__UpperCamelCase ) )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> bool:
# Base Case
if index == len(__UpperCamelCase ):
return True
# Recursive Step
for i in range(__UpperCamelCase ):
if valid_coloring(graph[index] , __UpperCamelCase , __UpperCamelCase ):
# Color current vertex
UpperCamelCase = i
# Validate coloring
if util_color(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , index + 1 ):
return True
# Backtrack
UpperCamelCase = -1
return False
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> list[int]:
UpperCamelCase = [-1] * len(__UpperCamelCase )
if util_color(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , 0 ):
return colored_vertices
return []
| 321 | 0 |
"""simple docstring"""
from collections import namedtuple
import requests
from lxml import html # type: ignore
_a : Tuple= namedtuple("covid_data", "cases deaths recovered")
def __UpperCAmelCase ( UpperCAmelCase_ : Union[str, Any] = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
'''simple docstring'''
__snake_case : Any = '//div[@class = \"maincounter-number\"]/span/text()'
return covid_data(*html.fromstring(requests.get(__UpperCamelCase ).content ).xpath(__UpperCamelCase ) )
_a : List[str]= "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}"
print(fmt.format(*covid_stats()))
| 172 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase = 2000000 )-> int:
UpperCamelCase = [0 for i in range(n + 1 )]
UpperCamelCase = 1
UpperCamelCase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , __UpperCamelCase ):
UpperCamelCase = 1
UpperCamelCase = 0
for i in range(__UpperCamelCase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'{solution() = }')
| 321 | 0 |
from math import factorial
a__ = {str(d): factorial(d) for d in range(10)}
def __UpperCAmelCase ( __a : Optional[int] ) -> int:
"""simple docstring"""
return sum(DIGIT_FACTORIAL[d] for d in str(__UpperCamelCase ) )
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
_a : Tuple = 7 * factorial(9 ) + 1
return sum(i for i in range(3 ,__UpperCamelCase ) if sum_of_digit_factorial(__UpperCamelCase ) == i )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 235 |
'''simple docstring'''
from timeit import timeit
def lowercase__ ( __UpperCamelCase )-> int:
if number < 0:
raise ValueError("""the value of input must not be negative""" )
UpperCamelCase = 0
while number:
number &= number - 1
result += 1
return result
def lowercase__ ( __UpperCamelCase )-> int:
if number < 0:
raise ValueError("""the value of input must not be negative""" )
UpperCamelCase = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def lowercase__ ( )-> None:
def do_benchmark(__UpperCamelCase ) -> None:
UpperCamelCase = """import __main__ as z"""
print(F"Benchmark when {number = }:" )
print(F"{get_set_bits_count_using_modulo_operator(__UpperCamelCase ) = }" )
UpperCamelCase = timeit("""z.get_set_bits_count_using_modulo_operator(25)""" , setup=__UpperCamelCase )
print(F"timeit() runs in {timing} seconds" )
print(F"{get_set_bits_count_using_brian_kernighans_algorithm(__UpperCamelCase ) = }" )
UpperCamelCase = timeit(
"""z.get_set_bits_count_using_brian_kernighans_algorithm(25)""" , setup=__UpperCamelCase , )
print(F"timeit() runs in {timing} seconds" )
for number in (25, 37, 58, 0):
do_benchmark(__UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 321 | 0 |
"""simple docstring"""
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__UpperCamelCase ) )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> bool:
# Base Case
if index == len(__UpperCamelCase ):
return True
# Recursive Step
for i in range(__UpperCamelCase ):
if valid_coloring(graph[index] , __UpperCamelCase , __UpperCamelCase ):
# Color current vertex
SCREAMING_SNAKE_CASE__ : Optional[int] = i
# Validate coloring
if util_color(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , index + 1 ):
return True
# Backtrack
SCREAMING_SNAKE_CASE__ : Optional[int] = -1
return False
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> list[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [-1] * len(__UpperCamelCase )
if util_color(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , 0 ):
return colored_vertices
return []
| 132 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimesformerModel',
'TimesformerForVideoClassification',
'TimesformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 321 | 0 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """M-CLIP"""
def __init__( self : Tuple , UpperCamelCase__ : Optional[Any]=1024 , UpperCamelCase__ : Any=768 , **UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = transformerDimSize
SCREAMING_SNAKE_CASE : List[str] = imageDimSize
super().__init__(**_SCREAMING_SNAKE_CASE )
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = MCLIPConfig
def __init__( self : Union[str, Any] , UpperCamelCase__ : str , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : int ):
'''simple docstring'''
super().__init__(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : str = XLMRobertaModel(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Dict = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def __A ( self : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.transformer(input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )[0]
SCREAMING_SNAKE_CASE : List[Any] = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(_SCREAMING_SNAKE_CASE ), embs
| 182 |
'''simple docstring'''
import math
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> float:
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__UpperCamelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 321 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_UpperCamelCase = logging.get_logger(__name__)
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ =["""input_values""", """padding_mask"""]
def __init__( self : Dict , _a : str = 1 , _a : List[str] = 2_4000 , _a : Optional[Any] = 0.0 , _a : Optional[Any] = None , _a : Optional[int] = None , **_a : List[Any] , ) -> Tuple:
super().__init__(feature_size=_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , padding_value=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCamelCase : Optional[int] = chunk_length_s
__lowerCamelCase : Dict = overlap
@property
def _lowercase ( self : int ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _lowercase ( self : Optional[int] ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self : Union[str, Any] , _a : Tuple , _a : Dict = None , _a : Tuple = False , _a : Union[str, Any] = None , _a : List[Any] = None , _a : Dict = None , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if padding and truncation:
raise ValueError('Both padding and truncation were set. Make sure you only set one.' )
elif padding is None:
# by default let's pad the inputs
__lowerCamelCase : int = True
__lowerCamelCase : Any = bool(
isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
__lowerCamelCase : List[str] = [np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
__lowerCamelCase : List[str] = np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
__lowerCamelCase : List[Any] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
__lowerCamelCase : Tuple = [np.asarray(_SCREAMING_SNAKE_CASE ).T]
# verify inputs are valid
for idx, example in enumerate(_SCREAMING_SNAKE_CASE ):
if example.ndim > 2:
raise ValueError(f'Expected input shape (channels, length) but got shape {example.shape}' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f'Expected mono audio but example has {example.shape[-1]} channels' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f'Expected stereo audio but example has {example.shape[-1]} channels' )
__lowerCamelCase : Union[str, Any] = None
__lowerCamelCase : List[str] = BatchFeature({'input_values': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
__lowerCamelCase : Optional[Any] = min(array.shape[0] for array in raw_audio )
__lowerCamelCase : Any = int(np.floor(max_length / self.chunk_stride ) )
__lowerCamelCase : Optional[Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
__lowerCamelCase : Optional[int] = max(array.shape[0] for array in raw_audio )
__lowerCamelCase : List[Any] = int(np.ceil(max_length / self.chunk_stride ) )
__lowerCamelCase : int = (nb_step - 1) * self.chunk_stride + self.chunk_length
__lowerCamelCase : str = 'max_length'
else:
__lowerCamelCase : List[Any] = input_values
# normal padding on batch
if padded_inputs is None:
__lowerCamelCase : Optional[Any] = self.pad(
_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , )
if padding:
__lowerCamelCase : Optional[int] = padded_inputs.pop('attention_mask' )
__lowerCamelCase : Optional[Any] = []
for example in padded_inputs.pop('input_values' ):
if self.feature_size == 1:
__lowerCamelCase : Any = example[..., None]
input_values.append(example.T )
__lowerCamelCase : int = input_values
if return_tensors is not None:
__lowerCamelCase : str = padded_inputs.convert_to_tensors(_SCREAMING_SNAKE_CASE )
return padded_inputs
| 208 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
SCREAMING_SNAKE_CASE__ = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
SCREAMING_SNAKE_CASE__ = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
SCREAMING_SNAKE_CASE__ = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def A__ ( self ) -> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {
"""id""": datasets.Value("""string""" ),
"""prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ),
},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://www.atticusprojectai.org/cuad"""] , reference_urls=["""https://www.atticusprojectai.org/cuad"""] , )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
UpperCamelCase = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
UpperCamelCase = evaluate(dataset=_SCREAMING_SNAKE_CASE , predictions=_SCREAMING_SNAKE_CASE )
return score
| 321 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Optional[Any] = {
"configuration_megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
"MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegatronBertForCausalLM",
"MegatronBertForMaskedLM",
"MegatronBertForMultipleChoice",
"MegatronBertForNextSentencePrediction",
"MegatronBertForPreTraining",
"MegatronBertForQuestionAnswering",
"MegatronBertForSequenceClassification",
"MegatronBertForTokenClassification",
"MegatronBertModel",
"MegatronBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
a__ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 161 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase )-> int:
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
UpperCamelCase = 1
UpperCamelCase = 1
while repunit:
UpperCamelCase = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def lowercase__ ( __UpperCamelCase = 1000000 )-> int:
UpperCamelCase = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(__UpperCamelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f'{solution() = }')
| 321 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = '''▁'''
__snake_case = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__snake_case = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'''
),
}
}
__snake_case = {
'''facebook/nllb-200-distilled-600M''': 1024,
}
# fmt: off
__snake_case = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class lowercase ( A__ ):
"""simple docstring"""
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = ['input_ids', 'attention_mask']
_a = []
_a = []
def __init__( self , UpperCamelCase_ , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_ = None , UpperCamelCase_=None , UpperCamelCase_=False , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :Tuple = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
UpperCamelCase__ :Dict = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCamelCase__ :Union[str, Any] = legacy_behaviour
super().__init__(
bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , src_lang=_SCREAMING_SNAKE_CASE , tgt_lang=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCamelCase__ :int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ :str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCamelCase__ :Union[str, Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCamelCase__ :Dict = 1
UpperCamelCase__ :int = len(self.sp_model )
UpperCamelCase__ :Tuple = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_SCREAMING_SNAKE_CASE )
}
UpperCamelCase__ :Optional[int] = {v: k for k, v in self.lang_code_to_id.items()}
UpperCamelCase__ :List[Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCamelCase__ :int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCamelCase__ :str = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
UpperCamelCase__ :Union[str, Any] = src_lang if src_lang is not None else '''eng_Latn'''
UpperCamelCase__ :Tuple = self.lang_code_to_id[self._src_lang]
UpperCamelCase__ :List[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
'''simple docstring'''
UpperCamelCase__ :int = self.__dict__.copy()
UpperCamelCase__ :Union[str, Any] = None
UpperCamelCase__ :Tuple = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCamelCase__ :List[Any] = {}
UpperCamelCase__ :int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCamelCase__ :Dict = [1] * len(self.prefix_tokens )
UpperCamelCase__ :str = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_SCREAMING_SNAKE_CASE )) + suffix_ones
return prefix_ones + ([0] * len(_SCREAMING_SNAKE_CASE )) + ([0] * len(_SCREAMING_SNAKE_CASE )) + suffix_ones
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
UpperCamelCase__ :Any = [self.sep_token_id]
UpperCamelCase__ :List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCamelCase__ :Dict = src_lang
UpperCamelCase__ :str = self(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCamelCase__ :Optional[Any] = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
UpperCamelCase__ :List[str] = tgt_lang_id
return inputs
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase__ :Tuple = self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :int = ''''''.join(_SCREAMING_SNAKE_CASE ).replace(_SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase__ :Optional[Any] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
UpperCamelCase__ :Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = "eng_Latn" , UpperCamelCase_ = None , UpperCamelCase_ = "fra_Latn" , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = src_lang
UpperCamelCase__ :int = tgt_lang
return super().prepare_seqaseq_batch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
UpperCamelCase__ :Union[str, Any] = []
UpperCamelCase__ :Tuple = [self.eos_token_id, self.cur_lang_code]
else:
UpperCamelCase__ :Optional[Any] = [self.cur_lang_code]
UpperCamelCase__ :Union[str, Any] = [self.eos_token_id]
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = self.lang_code_to_id[lang]
if self.legacy_behaviour:
UpperCamelCase__ :Any = []
UpperCamelCase__ :Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
else:
UpperCamelCase__ :Optional[Any] = [self.cur_lang_code]
UpperCamelCase__ :Any = [self.eos_token_id] | 97 |
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance == 0:
return {"resistance": sqrt(pow(__UpperCamelCase , 2 ) - pow(__UpperCamelCase , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(__UpperCamelCase , 2 ) - pow(__UpperCamelCase , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(__UpperCamelCase , 2 ) + pow(__UpperCamelCase , 2 ) )}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 321 | 0 |
from math import ceil
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Tuple = list(range(0, __UpperCamelCase ) )
lowerCamelCase : Optional[int] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
lowerCamelCase : Tuple = []
for i in device_map_blocks:
if device_map_blocks.count(__UpperCamelCase ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(__UpperCamelCase )
# Missing blocks
lowerCamelCase : str = [i for i in blocks if i not in device_map_blocks]
lowerCamelCase : Tuple = [i for i in device_map_blocks if i not in blocks]
if len(__UpperCamelCase ) != 0:
raise ValueError(
"""Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."""
""" These attention blocks were specified more than once: """ + str(__UpperCamelCase ) )
if len(__UpperCamelCase ) != 0:
raise ValueError(
"""There are attention blocks for this model that are not specified in the device_map. Add these attention """
"""blocks to a device on the device_map: """ + str(__UpperCamelCase ) )
if len(__UpperCamelCase ) != 0:
raise ValueError(
"""The device_map contains more attention blocks than this model has. Remove these from the device_map:"""
+ str(__UpperCamelCase ) )
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Optional[Any] = list(range(__UpperCamelCase ) )
lowerCamelCase : List[str] = int(ceil(n_layers / len(__UpperCamelCase ) ) )
lowerCamelCase : List[str] = [layers[i : i + n_blocks] for i in range(0, __UpperCamelCase, __UpperCamelCase )]
return dict(zip(__UpperCamelCase, __UpperCamelCase ) )
| 287 |
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def lowercase__ ( __UpperCamelCase )-> Union[str, Any]:
UpperCamelCase = min(__UpperCamelCase ) # min() finds the minimum value
UpperCamelCase = max(__UpperCamelCase ) # max() finds the maximum value
UpperCamelCase = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
UpperCamelCase = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(__UpperCamelCase , __UpperCamelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
UpperCamelCase = 0
for count in range(__UpperCamelCase ):
while holes[count] > 0:
holes[count] -= 1
UpperCamelCase = count + min_val
i += 1
def lowercase__ ( )-> Any:
UpperCamelCase = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(__UpperCamelCase )
print("""Sorted order is:""" , """ """.join(__UpperCamelCase ) )
if __name__ == "__main__":
main()
| 321 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'configuration_mobilenet_v2': [
'MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileNetV2Config',
'MobileNetV2OnnxConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['MobileNetV2FeatureExtractor']
lowerCAmelCase_ = ['MobileNetV2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileNetV2ForImageClassification',
'MobileNetV2ForSemanticSegmentation',
'MobileNetV2Model',
'MobileNetV2PreTrainedModel',
'load_tf_weights_in_mobilenet_v2',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16 |
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class a_ ( lowerCamelCase ):
lowercase = (DDPMParallelScheduler,)
def A__ ( self , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def A__ ( self ) -> List[str]:
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=_SCREAMING_SNAKE_CASE , beta_end=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Tuple:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> str:
"""simple docstring"""
self.check_over_configs(thresholding=_SCREAMING_SNAKE_CASE )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_SCREAMING_SNAKE_CASE , prediction_type=_SCREAMING_SNAKE_CASE , sample_max_value=_SCREAMING_SNAKE_CASE , )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = self.dummy_sample_deter + 0.1
UpperCamelCase = self.dummy_sample_deter - 0.1
UpperCamelCase = samplea.shape[0]
UpperCamelCase = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCamelCase = torch.arange(_SCREAMING_SNAKE_CASE )[0:3, None].repeat(1 , _SCREAMING_SNAKE_CASE )
UpperCamelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCamelCase = scheduler.batch_step_no_noise(_SCREAMING_SNAKE_CASE , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1e-2
assert abs(result_mean.item() - 0.5_0_0_5 ) < 1e-3
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(_SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
UpperCamelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(_SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
UpperCamelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
UpperCamelCase = scheduler.timesteps
for i, timestep in enumerate(_SCREAMING_SNAKE_CASE ):
if i == len(_SCREAMING_SNAKE_CASE ) - 1:
UpperCamelCase = -1
else:
UpperCamelCase = timesteps[i + 1]
UpperCamelCase = scheduler.previous_timestep(_SCREAMING_SNAKE_CASE )
UpperCamelCase = prev_t.item()
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = [100, 87, 50, 51, 0]
with self.assertRaises(_SCREAMING_SNAKE_CASE , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = [100, 87, 50, 1, 0]
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
with self.assertRaises(_SCREAMING_SNAKE_CASE , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=_SCREAMING_SNAKE_CASE , timesteps=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_SCREAMING_SNAKE_CASE , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
| 321 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class A_ (lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = XLMTokenizer
SCREAMING_SNAKE_CASE__ : Optional[int] = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ : str = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
UpperCAmelCase_ : Any = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
UpperCAmelCase_ : Any = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
UpperCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(_SCREAMING_SNAKE_CASE ) )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = "lower newer"
UpperCAmelCase_ : Optional[Any] = "lower newer"
return input_text, output_text
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = XLMTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase_ : Optional[int] = "lower"
UpperCAmelCase_ : Union[str, Any] = ["low", "er</w>"]
UpperCAmelCase_ : Tuple = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = tokens + ["<unk>"]
UpperCAmelCase_ : List[Any] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = XLMTokenizer.from_pretrained("xlm-mlm-en-2048" )
UpperCAmelCase_ : str = tokenizer.encode("sequence builders" , add_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = tokenizer.encode("multi-sequence build" , add_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 61 |
'''simple docstring'''
from __future__ import annotations
import math
class a_ :
def __init__( self , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
UpperCamelCase = size
# approximate the overall size of segment tree with given value
UpperCamelCase = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
UpperCamelCase = [0 for i in range(0 , 4 * size )]
UpperCamelCase = [0 for i in range(0 , 4 * size )] # flag for lazy update
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return idx * 2
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return idx * 2 + 1
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
if left_element == right_element:
UpperCamelCase = a[left_element - 1]
else:
UpperCamelCase = (left_element + right_element) // 2
self.build(self.left(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.build(self.right(_SCREAMING_SNAKE_CASE ) , mid + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = max(
self.segment_tree[self.left(_SCREAMING_SNAKE_CASE )] , self.segment_tree[self.right(_SCREAMING_SNAKE_CASE )] )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
if self.flag[idx] is True:
UpperCamelCase = self.lazy[idx]
UpperCamelCase = False
if left_element != right_element:
UpperCamelCase = self.lazy[idx]
UpperCamelCase = self.lazy[idx]
UpperCamelCase = True
UpperCamelCase = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
UpperCamelCase = val
if left_element != right_element:
UpperCamelCase = val
UpperCamelCase = val
UpperCamelCase = True
UpperCamelCase = True
return True
UpperCamelCase = (left_element + right_element) // 2
self.update(self.left(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.update(self.right(_SCREAMING_SNAKE_CASE ) , mid + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = max(
self.segment_tree[self.left(_SCREAMING_SNAKE_CASE )] , self.segment_tree[self.right(_SCREAMING_SNAKE_CASE )] )
return True
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int | float:
"""simple docstring"""
if self.flag[idx] is True:
UpperCamelCase = self.lazy[idx]
UpperCamelCase = False
if left_element != right_element:
UpperCamelCase = self.lazy[idx]
UpperCamelCase = self.lazy[idx]
UpperCamelCase = True
UpperCamelCase = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
UpperCamelCase = (left_element + right_element) // 2
UpperCamelCase = self.query(self.left(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self.query(self.right(_SCREAMING_SNAKE_CASE ) , mid + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __str__( self ) -> str:
"""simple docstring"""
return str([self.query(1 , 1 , self.size , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8]
SCREAMING_SNAKE_CASE__ = 1_5
SCREAMING_SNAKE_CASE__ = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 1_1))
print(segt.query(1, 1, size, 7, 1_2))
segt.update(1, 1, size, 1, 3, 1_1_1)
print(segt.query(1, 1, size, 1, 1_5))
segt.update(1, 1, size, 7, 8, 2_3_5)
print(segt)
| 321 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( _UpperCamelCase : Optional[int] = 2_00_00_00 ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[0 for i in range(n + 1 )]
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , __UpperCamelCase ):
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =0
for i in range(__UpperCamelCase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'''{solution() = }''')
| 47 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase = 1000 )-> int:
UpperCamelCase = -1
UpperCamelCase = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
UpperCamelCase = (n * n - 2 * a * n) // (2 * n - 2 * a)
UpperCamelCase = n - a - b
if c * c == (a * a + b * b):
UpperCamelCase = a * b * c
if candidate >= product:
UpperCamelCase = candidate
return product
if __name__ == "__main__":
print(f'{solution() = }')
| 321 | 0 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def __UpperCAmelCase ( UpperCAmelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def __UpperCAmelCase ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int ) -> Dict:
'''simple docstring'''
__snake_case : Optional[int] = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
__snake_case : List[Any] = key.replace('heads.cmd.mim_head.cls.predictions' , 'mmm_image_head' )
__snake_case : str = key.replace('heads.cmd.mlm_head.cls.predictions' , 'mmm_text_head' )
__snake_case : Dict = key.replace('heads.cmd.itm_head.cls' , 'itm_head' )
__snake_case : List[str] = key.replace('heads.cmd.itm_head.pooler' , 'itm_head.pooler' )
__snake_case : int = key.replace('heads.cmd.clip_head.logit_scale' , 'flava.logit_scale' )
__snake_case : Dict = key.replace('heads.fairseq_mlm.cls.predictions' , 'mlm_head' )
__snake_case : str = key.replace('heads.imagenet.mim_head.cls.predictions' , 'mim_head' )
__snake_case : Any = key.replace('mm_text_projection' , 'flava.text_to_mm_projection' )
__snake_case : Union[str, Any] = key.replace('mm_image_projection' , 'flava.image_to_mm_projection' )
__snake_case : Optional[int] = key.replace('image_encoder.module' , 'flava.image_model' )
__snake_case : Union[str, Any] = key.replace('text_encoder.module' , 'flava.text_model' )
__snake_case : Any = key.replace('mm_encoder.module.encoder.cls_token' , 'flava.multimodal_model.cls_token' )
__snake_case : Dict = key.replace('mm_encoder.module' , 'flava.multimodal_model' )
__snake_case : int = key.replace('text_projection' , 'flava.text_projection' )
__snake_case : List[Any] = key.replace('image_projection' , 'flava.image_projection' )
__snake_case : Tuple = value.float()
for key, value in codebook_state_dict.items():
__snake_case : str = value
return upgrade
@torch.no_grad()
def __UpperCAmelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int]=None ) -> Any:
'''simple docstring'''
if config_path is not None:
__snake_case : List[Any] = FlavaConfig.from_pretrained(__UpperCamelCase )
else:
__snake_case : Union[str, Any] = FlavaConfig()
__snake_case : Tuple = FlavaForPreTraining(__UpperCamelCase ).eval()
__snake_case : Optional[Any] = convert_dalle_checkpoint(__UpperCamelCase , __UpperCamelCase , save_checkpoint=__UpperCamelCase )
if os.path.exists(__UpperCamelCase ):
__snake_case : List[str] = torch.load(__UpperCamelCase , map_location='cpu' )
else:
__snake_case : Tuple = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location='cpu' )
__snake_case : int = upgrade_state_dict(__UpperCamelCase , __UpperCamelCase )
hf_model.load_state_dict(__UpperCamelCase )
__snake_case : Optional[Any] = hf_model.state_dict()
__snake_case : str = count_parameters(__UpperCamelCase )
__snake_case : Dict = count_parameters(__UpperCamelCase ) + count_parameters(__UpperCamelCase )
assert torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 )
hf_model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
_a : Optional[int]= argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
_a : int= parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 172 |
'''simple docstring'''
import argparse
import struct
import unittest
class a_ :
def __init__( self , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
UpperCamelCase = data
# Initialize hash values
UpperCamelCase = [
0x6A_09_E6_67,
0xBB_67_AE_85,
0x3C_6E_F3_72,
0xA5_4F_F5_3A,
0x51_0E_52_7F,
0x9B_05_68_8C,
0x1F_83_D9_AB,
0x5B_E0_CD_19,
]
# Initialize round constants
UpperCamelCase = [
0x42_8A_2F_98,
0x71_37_44_91,
0xB5_C0_FB_CF,
0xE9_B5_DB_A5,
0x39_56_C2_5B,
0x59_F1_11_F1,
0x92_3F_82_A4,
0xAB_1C_5E_D5,
0xD8_07_AA_98,
0x12_83_5B_01,
0x24_31_85_BE,
0x55_0C_7D_C3,
0x72_BE_5D_74,
0x80_DE_B1_FE,
0x9B_DC_06_A7,
0xC1_9B_F1_74,
0xE4_9B_69_C1,
0xEF_BE_47_86,
0x0F_C1_9D_C6,
0x24_0C_A1_CC,
0x2D_E9_2C_6F,
0x4A_74_84_AA,
0x5C_B0_A9_DC,
0x76_F9_88_DA,
0x98_3E_51_52,
0xA8_31_C6_6D,
0xB0_03_27_C8,
0xBF_59_7F_C7,
0xC6_E0_0B_F3,
0xD5_A7_91_47,
0x06_CA_63_51,
0x14_29_29_67,
0x27_B7_0A_85,
0x2E_1B_21_38,
0x4D_2C_6D_FC,
0x53_38_0D_13,
0x65_0A_73_54,
0x76_6A_0A_BB,
0x81_C2_C9_2E,
0x92_72_2C_85,
0xA2_BF_E8_A1,
0xA8_1A_66_4B,
0xC2_4B_8B_70,
0xC7_6C_51_A3,
0xD1_92_E8_19,
0xD6_99_06_24,
0xF4_0E_35_85,
0x10_6A_A0_70,
0x19_A4_C1_16,
0x1E_37_6C_08,
0x27_48_77_4C,
0x34_B0_BC_B5,
0x39_1C_0C_B3,
0x4E_D8_AA_4A,
0x5B_9C_CA_4F,
0x68_2E_6F_F3,
0x74_8F_82_EE,
0x78_A5_63_6F,
0x84_C8_78_14,
0x8C_C7_02_08,
0x90_BE_FF_FA,
0xA4_50_6C_EB,
0xBE_F9_A3_F7,
0xC6_71_78_F2,
]
UpperCamelCase = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def A__ ( _SCREAMING_SNAKE_CASE ) -> bytes:
"""simple docstring"""
UpperCamelCase = B"""\x80""" + (B"""\x00""" * (63 - (len(_SCREAMING_SNAKE_CASE ) + 8) % 64))
UpperCamelCase = struct.pack(""">Q""" , (len(_SCREAMING_SNAKE_CASE ) * 8) )
return data + padding + big_endian_integer
def A__ ( self ) -> None:
"""simple docstring"""
UpperCamelCase = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
UpperCamelCase = list(struct.unpack(""">16L""" , _SCREAMING_SNAKE_CASE ) )
# add 48 0-ed integers
words += [0] * 48
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
UpperCamelCase = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
UpperCamelCase = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
UpperCamelCase = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_00_00_00_00
# Compression
UpperCamelCase = self.ror(_SCREAMING_SNAKE_CASE , 6 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 11 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 25 )
UpperCamelCase = (e & f) ^ ((~e & 0xFF_FF_FF_FF) & g)
UpperCamelCase = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_00_00_00_00
UpperCamelCase = self.ror(_SCREAMING_SNAKE_CASE , 2 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 13 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 22 )
UpperCamelCase = (a & b) ^ (a & c) ^ (b & c)
UpperCamelCase = (sa + maj) % 0x1_00_00_00_00
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = (
g,
f,
e,
((d + tempa) % 0x1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0x1_00_00_00_00),
)
UpperCamelCase = [a, b, c, d, e, f, g, h]
# Modify final values
UpperCamelCase = [
((element + mutated_hash_values[index]) % 0x1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
UpperCamelCase = """""".join([hex(_SCREAMING_SNAKE_CASE )[2:].zfill(8 ) for value in self.hashes] )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return 0xFF_FF_FF_FF & (value << (32 - rotations)) | (value >> rotations)
class a_ ( unittest.TestCase ):
def A__ ( self ) -> None:
"""simple docstring"""
import hashlib
UpperCamelCase = bytes("""Test String""" , """utf-8""" )
self.assertEqual(SHAaaa(_SCREAMING_SNAKE_CASE ).hash , hashlib.shaaaa(_SCREAMING_SNAKE_CASE ).hexdigest() )
def lowercase__ ( )-> None:
import doctest
doctest.testmod()
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
UpperCamelCase = parser.parse_args()
UpperCamelCase = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
UpperCamelCase = f.read()
else:
UpperCamelCase = bytes(__UpperCamelCase , """utf-8""" )
print(SHAaaa(__UpperCamelCase ).hash )
if __name__ == "__main__":
main()
| 321 | 0 |
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( __a : Any ,__a : List[str] ,__a : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
def get_masked_lm_array(__a : Union[str, Any] ):
_a : List[str] = F"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
_a : Any = tf.train.load_variable(__UpperCamelCase ,__UpperCamelCase )
if "kernel" in name:
_a : Tuple = array.transpose()
return torch.from_numpy(__UpperCamelCase )
def get_encoder_array(__a : str ):
_a : List[str] = F"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
_a : List[str] = tf.train.load_variable(__UpperCamelCase ,__UpperCamelCase )
if "kernel" in name:
_a : str = array.transpose()
return torch.from_numpy(__UpperCamelCase )
def get_encoder_layer_array(__a : List[Any] ,__a : List[str] ):
_a : Optional[Any] = F"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
_a : Optional[int] = tf.train.load_variable(__UpperCamelCase ,__UpperCamelCase )
if "kernel" in name:
_a : List[str] = array.transpose()
return torch.from_numpy(__UpperCamelCase )
def get_encoder_attention_layer_array(__a : int ,__a : List[str] ,__a : List[Any] ):
_a : List[Any] = F"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
_a : str = tf.train.load_variable(__UpperCamelCase ,__UpperCamelCase )
_a : List[str] = array.reshape(__UpperCamelCase )
if "kernel" in name:
_a : List[Any] = array.transpose()
return torch.from_numpy(__UpperCamelCase )
print(F"""Loading model based on config from {config_path}...""" )
_a : Any = BertConfig.from_json_file(__UpperCamelCase )
_a : int = BertForMaskedLM(__UpperCamelCase )
# Layers
for layer_index in range(0 ,config.num_hidden_layers ):
_a : str = model.bert.encoder.layer[layer_index]
# Self-attention
_a : List[str] = layer.attention.self
_a : int = get_encoder_attention_layer_array(
__UpperCamelCase ,'''_query_dense/kernel''' ,self_attn.query.weight.data.shape )
_a : Optional[int] = get_encoder_attention_layer_array(
__UpperCamelCase ,'''_query_dense/bias''' ,self_attn.query.bias.data.shape )
_a : Union[str, Any] = get_encoder_attention_layer_array(
__UpperCamelCase ,'''_key_dense/kernel''' ,self_attn.key.weight.data.shape )
_a : Optional[Any] = get_encoder_attention_layer_array(
__UpperCamelCase ,'''_key_dense/bias''' ,self_attn.key.bias.data.shape )
_a : Optional[int] = get_encoder_attention_layer_array(
__UpperCamelCase ,'''_value_dense/kernel''' ,self_attn.value.weight.data.shape )
_a : Any = get_encoder_attention_layer_array(
__UpperCamelCase ,'''_value_dense/bias''' ,self_attn.value.bias.data.shape )
# Self-attention Output
_a : int = layer.attention.output
_a : Any = get_encoder_attention_layer_array(
__UpperCamelCase ,'''_output_dense/kernel''' ,self_output.dense.weight.data.shape )
_a : Any = get_encoder_attention_layer_array(
__UpperCamelCase ,'''_output_dense/bias''' ,self_output.dense.bias.data.shape )
_a : Any = get_encoder_layer_array(__UpperCamelCase ,'''_attention_layer_norm/gamma''' )
_a : str = get_encoder_layer_array(__UpperCamelCase ,'''_attention_layer_norm/beta''' )
# Intermediate
_a : Tuple = layer.intermediate
_a : str = get_encoder_layer_array(__UpperCamelCase ,'''_intermediate_dense/kernel''' )
_a : Optional[Any] = get_encoder_layer_array(__UpperCamelCase ,'''_intermediate_dense/bias''' )
# Output
_a : List[Any] = layer.output
_a : str = get_encoder_layer_array(__UpperCamelCase ,'''_output_dense/kernel''' )
_a : List[Any] = get_encoder_layer_array(__UpperCamelCase ,'''_output_dense/bias''' )
_a : str = get_encoder_layer_array(__UpperCamelCase ,'''_output_layer_norm/gamma''' )
_a : Any = get_encoder_layer_array(__UpperCamelCase ,'''_output_layer_norm/beta''' )
# Embeddings
_a : Dict = get_encoder_array('''_position_embedding_layer/embeddings''' )
_a : str = get_encoder_array('''_type_embedding_layer/embeddings''' )
_a : Any = get_encoder_array('''_embedding_norm_layer/gamma''' )
_a : Union[str, Any] = get_encoder_array('''_embedding_norm_layer/beta''' )
# LM Head
_a : Dict = model.cls.predictions.transform
_a : Dict = get_masked_lm_array('''dense/kernel''' )
_a : Dict = get_masked_lm_array('''dense/bias''' )
_a : Tuple = get_masked_lm_array('''layer_norm/gamma''' )
_a : Union[str, Any] = get_masked_lm_array('''layer_norm/beta''' )
_a : str = get_masked_lm_array('''embedding_table''' )
# Pooling
_a : Dict = BertPooler(config=__UpperCamelCase )
_a : Optional[int] = get_encoder_array('''_pooler_layer/kernel''' )
_a : int = get_encoder_array('''_pooler_layer/bias''' )
# Export final model
model.save_pretrained(__UpperCamelCase )
# Integration test - should load without any errors ;)
_a : List[str] = BertForMaskedLM.from_pretrained(__UpperCamelCase )
print(new_model.eval() )
print('''Model conversion was done sucessfully!''' )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow Token Dropping checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
a__ = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 235 |
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
SCREAMING_SNAKE_CASE__ = _symbol_database.Default()
SCREAMING_SNAKE_CASE__ = _descriptor_pool.Default().AddSerializedFile(
b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'
)
SCREAMING_SNAKE_CASE__ = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = b'H\003'
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
SCREAMING_SNAKE_CASE__ = 4_5
SCREAMING_SNAKE_CASE__ = 1_5_8_1
SCREAMING_SNAKE_CASE__ = 1_5_1_7
SCREAMING_SNAKE_CASE__ = 1_5_7_0
SCREAMING_SNAKE_CASE__ = 1_5_8_4
SCREAMING_SNAKE_CASE__ = 1_7_9_3
SCREAMING_SNAKE_CASE__ = 1_7_9_5
SCREAMING_SNAKE_CASE__ = 1_9_1_6
SCREAMING_SNAKE_CASE__ = 1_8_6_4
SCREAMING_SNAKE_CASE__ = 1_9_0_5
SCREAMING_SNAKE_CASE__ = 1_9_1_9
SCREAMING_SNAKE_CASE__ = 2_4_2_9
SCREAMING_SNAKE_CASE__ = 2_2_0_8
SCREAMING_SNAKE_CASE__ = 2_4_1_8
SCREAMING_SNAKE_CASE__ = 2_3_2_3
SCREAMING_SNAKE_CASE__ = 2_4_0_7
# @@protoc_insertion_point(module_scope)
| 321 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a :Optional[int] = logging.get_logger(__name__)
a :int = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :str = """falcon"""
_SCREAMING_SNAKE_CASE :str = ["""past_key_values"""]
def __init__( self , _a=65_024 , _a=4_544 , _a=32 , _a=71 , _a=1E-5 , _a=0.02 , _a=True , _a=0.0 , _a=0.0 , _a=None , _a=False , _a=False , _a=True , _a=True , _a=False , _a=11 , _a=11 , **_a , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = vocab_size
# Backward compatibility with n_embed kwarg
SCREAMING_SNAKE_CASE__ : Optional[int] = kwargs.pop("""n_embed""" , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ : int = hidden_size if n_embed is None else n_embed
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : int = num_attention_heads
SCREAMING_SNAKE_CASE__ : int = layer_norm_epsilon
SCREAMING_SNAKE_CASE__ : int = initializer_range
SCREAMING_SNAKE_CASE__ : List[str] = use_cache
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout
SCREAMING_SNAKE_CASE__ : Tuple = attention_dropout
SCREAMING_SNAKE_CASE__ : Optional[Any] = bos_token_id
SCREAMING_SNAKE_CASE__ : Union[str, Any] = eos_token_id
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_attention_heads if num_kv_heads is None else num_kv_heads
SCREAMING_SNAKE_CASE__ : Any = alibi
SCREAMING_SNAKE_CASE__ : str = new_decoder_architecture
SCREAMING_SNAKE_CASE__ : Tuple = multi_query # Ignored when new_decoder_architecture is True
SCREAMING_SNAKE_CASE__ : str = parallel_attn
SCREAMING_SNAKE_CASE__ : str = bias
super().__init__(bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def _a ( self ) -> Dict:
"""simple docstring"""
return self.hidden_size // self.num_attention_heads
@property
def _a ( self ) -> Dict:
"""simple docstring"""
return not self.alibi
| 132 |
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 8.31_44_62 # Unit - J mol-1 K-1
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 321 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : str = {
'configuration_xlm_roberta_xl': [
'XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaXLConfig',
'XLMRobertaXLOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = [
'XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaXLForCausalLM',
'XLMRobertaXLForMaskedLM',
'XLMRobertaXLForMultipleChoice',
'XLMRobertaXLForQuestionAnswering',
'XLMRobertaXLForSequenceClassification',
'XLMRobertaXLForTokenClassification',
'XLMRobertaXLModel',
'XLMRobertaXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 182 |
'''simple docstring'''
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
SCREAMING_SNAKE_CASE__ = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
SCREAMING_SNAKE_CASE__ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase__ ( __UpperCamelCase )-> str:
if "://" in dataset_path:
UpperCamelCase = dataset_path.split("""://""" )[1]
return dataset_path
def lowercase__ ( __UpperCamelCase )-> bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> int:
UpperCamelCase = not is_remote_filesystem(__UpperCamelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__UpperCamelCase ) , fs._strip_protocol(__UpperCamelCase ) )
else:
fs.mv(__UpperCamelCase , __UpperCamelCase , recursive=__UpperCamelCase )
def lowercase__ ( )-> None:
if hasattr(fsspec.asyn , """reset_lock""" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = threading.Lock()
| 321 | 0 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_UpperCamelCase = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class lowerCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
a_ =MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a_ =TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
a_ ={config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
a_ ={
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def _lowercase ( self : int , _a : int , _a : Optional[int] , _a : List[str] ) -> Dict:
__lowerCamelCase : int = ZeroShotClassificationPipeline(
model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , candidate_labels=['polics', 'health'] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def _lowercase ( self : Union[str, Any] , _a : Optional[int] , _a : Optional[Any] ) -> List[str]:
__lowerCamelCase : Any = classifier('Who are you voting for in 2020?' , candidate_labels='politics' )
self.assertEqual(_SCREAMING_SNAKE_CASE , {'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'labels': [ANY(_SCREAMING_SNAKE_CASE )], 'scores': [ANY(_SCREAMING_SNAKE_CASE )]} )
# No kwarg
__lowerCamelCase : Any = classifier('Who are you voting for in 2020?' , ['politics'] )
self.assertEqual(_SCREAMING_SNAKE_CASE , {'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'labels': [ANY(_SCREAMING_SNAKE_CASE )], 'scores': [ANY(_SCREAMING_SNAKE_CASE )]} )
__lowerCamelCase : int = classifier('Who are you voting for in 2020?' , candidate_labels=['politics'] )
self.assertEqual(_SCREAMING_SNAKE_CASE , {'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'labels': [ANY(_SCREAMING_SNAKE_CASE )], 'scores': [ANY(_SCREAMING_SNAKE_CASE )]} )
__lowerCamelCase : List[str] = classifier('Who are you voting for in 2020?' , candidate_labels='politics, public health' )
self.assertEqual(
_SCREAMING_SNAKE_CASE , {'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'labels': [ANY(_SCREAMING_SNAKE_CASE ), ANY(_SCREAMING_SNAKE_CASE )], 'scores': [ANY(_SCREAMING_SNAKE_CASE ), ANY(_SCREAMING_SNAKE_CASE )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
__lowerCamelCase : List[str] = classifier('Who are you voting for in 2020?' , candidate_labels=['politics', 'public health'] )
self.assertEqual(
_SCREAMING_SNAKE_CASE , {'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'labels': [ANY(_SCREAMING_SNAKE_CASE ), ANY(_SCREAMING_SNAKE_CASE )], 'scores': [ANY(_SCREAMING_SNAKE_CASE ), ANY(_SCREAMING_SNAKE_CASE )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
__lowerCamelCase : int = classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='This text is about {}' )
self.assertEqual(_SCREAMING_SNAKE_CASE , {'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'labels': [ANY(_SCREAMING_SNAKE_CASE )], 'scores': [ANY(_SCREAMING_SNAKE_CASE )]} )
# https://github.com/huggingface/transformers/issues/13846
__lowerCamelCase : str = classifier(['I am happy'] , ['positive', 'negative'] )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'labels': [ANY(_SCREAMING_SNAKE_CASE ), ANY(_SCREAMING_SNAKE_CASE )], 'scores': [ANY(_SCREAMING_SNAKE_CASE ), ANY(_SCREAMING_SNAKE_CASE )]}
for i in range(1 )
] , )
__lowerCamelCase : Any = classifier(['I am happy', 'I am sad'] , ['positive', 'negative'] )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'labels': [ANY(_SCREAMING_SNAKE_CASE ), ANY(_SCREAMING_SNAKE_CASE )], 'scores': [ANY(_SCREAMING_SNAKE_CASE ), ANY(_SCREAMING_SNAKE_CASE )]}
for i in range(2 )
] , )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
classifier('' , candidate_labels='politics' )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
classifier(_SCREAMING_SNAKE_CASE , candidate_labels='politics' )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
classifier('Who are you voting for in 2020?' , candidate_labels='' )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
classifier('Who are you voting for in 2020?' , candidate_labels=_SCREAMING_SNAKE_CASE )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='Not formatting template' , )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template=_SCREAMING_SNAKE_CASE , )
self.run_entailment_id(_SCREAMING_SNAKE_CASE )
def _lowercase ( self : Any , _a : List[Any] ) -> str:
__lowerCamelCase : List[Any] = zero_shot_classifier.model.config
__lowerCamelCase : Optional[Any] = config.labelaid
__lowerCamelCase : Optional[Any] = zero_shot_classifier.entailment_id
__lowerCamelCase : Any = {'LABEL_0': 0, 'LABEL_1': 1, 'LABEL_2': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
__lowerCamelCase : Optional[Any] = {'entailment': 0, 'neutral': 1, 'contradiction': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__lowerCamelCase : Tuple = {'ENTAIL': 0, 'NON-ENTAIL': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__lowerCamelCase : Any = {'ENTAIL': 2, 'NEUTRAL': 1, 'CONTR': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
__lowerCamelCase : Tuple = original_labelaid
self.assertEqual(_SCREAMING_SNAKE_CASE , zero_shot_classifier.entailment_id )
@require_torch
def _lowercase ( self : Any ) -> List[str]:
__lowerCamelCase : List[str] = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'Who are you voting for in 2020?' * 100 , candidate_labels=['politics', 'public health', 'science'] )
@require_torch
def _lowercase ( self : Union[str, Any] ) -> Tuple:
__lowerCamelCase : Tuple = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
__lowerCamelCase : List[str] = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.333, 0.333, 0.333],
} , )
@require_tf
def _lowercase ( self : List[str] ) -> List[str]:
__lowerCamelCase : Optional[Any] = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='tf' , )
__lowerCamelCase : Union[str, Any] = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def _lowercase ( self : Dict ) -> Optional[Any]:
__lowerCamelCase : Dict = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='pt' )
__lowerCamelCase : Optional[int] = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.976, 0.015, 0.009],
} , )
__lowerCamelCase : Any = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=_SCREAMING_SNAKE_CASE , )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def _lowercase ( self : Dict ) -> int:
__lowerCamelCase : int = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='tf' )
__lowerCamelCase : Any = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.976, 0.015, 0.009],
} , )
__lowerCamelCase : Dict = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=_SCREAMING_SNAKE_CASE , )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.817, 0.713, 0.018, 0.018],
} , )
| 208 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
'configuration_xlm_roberta_xl': [
'XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaXLConfig',
'XLMRobertaXLOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaXLForCausalLM',
'XLMRobertaXLForMaskedLM',
'XLMRobertaXLForMultipleChoice',
'XLMRobertaXLForQuestionAnswering',
'XLMRobertaXLForSequenceClassification',
'XLMRobertaXLForTokenClassification',
'XLMRobertaXLModel',
'XLMRobertaXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 321 | 0 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
UpperCAmelCase__ : Optional[Any] = ['image_processor', 'feature_extractor']
UpperCAmelCase__ : str = 'TvltImageProcessor'
UpperCAmelCase__ : Tuple = 'TvltFeatureExtractor'
def __init__( self :List[str] , _A :int , _A :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
super().__init__(image_processor=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE )
__A = image_processor
__A = feature_extractor
def __call__( self :Optional[Any] , _A :Any=None , _A :Optional[int]=None , _A :Optional[Any]=None , _A :Optional[int]=None , _A :List[str]=False , _A :Union[str, Any]=False , *_A :Optional[Any] , **_A :Union[str, Any] , ) -> Union[str, Any]:
'''simple docstring'''
if images is None and audio is None:
raise ValueError('You need to specify either an `images` or `audio` input to process.' )
__A = None
if images is not None:
__A = self.image_processor(_SCREAMING_SNAKE_CASE , mask_pixel=_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if images_mixed is not None:
__A = self.image_processor(_SCREAMING_SNAKE_CASE , is_mixed=_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if audio is not None:
__A = self.feature_extractor(
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , mask_audio=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__A = {}
if audio is not None:
output_dict.update(_SCREAMING_SNAKE_CASE )
if images is not None:
output_dict.update(_SCREAMING_SNAKE_CASE )
if images_mixed_dict is not None:
output_dict.update(_SCREAMING_SNAKE_CASE )
return output_dict
@property
def lowercase_ ( self :List[str] ) -> Dict:
'''simple docstring'''
__A = self.image_processor.model_input_names
__A = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 161 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
SCREAMING_SNAKE_CASE__ = 'docs/source/en/_toctree.yml'
def lowercase__ ( __UpperCamelCase )-> Optional[Any]:
UpperCamelCase = defaultdict(__UpperCamelCase )
UpperCamelCase = []
UpperCamelCase = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} )
else:
new_doc_list.append(__UpperCamelCase )
UpperCamelCase = new_doc_list
UpperCamelCase = [key for key, value in counts.items() if value > 1]
UpperCamelCase = []
for duplicate_key in duplicates:
UpperCamelCase = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} )
if len(__UpperCamelCase ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] )
UpperCamelCase = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(__UpperCamelCase ) > 1:
raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" )
overview_doc.extend(__UpperCamelCase )
# Sort
return overview_doc
def lowercase__ ( __UpperCamelCase=False )-> List[str]:
with open(__UpperCamelCase , encoding="""utf-8""" ) as f:
UpperCamelCase = yaml.safe_load(f.read() )
# Get to the API doc
UpperCamelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCamelCase = content[api_idx]["""sections"""]
# Then to the model doc
UpperCamelCase = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
UpperCamelCase = api_doc[scheduler_idx]["""sections"""]
UpperCamelCase = clean_doc_toc(__UpperCamelCase )
UpperCamelCase = False
if new_scheduler_doc != scheduler_doc:
UpperCamelCase = True
if overwrite:
UpperCamelCase = new_scheduler_doc
if diff:
if overwrite:
UpperCamelCase = api_doc
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(__UpperCamelCase , allow_unicode=__UpperCamelCase ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
def lowercase__ ( __UpperCamelCase=False )-> Tuple:
with open(__UpperCamelCase , encoding="""utf-8""" ) as f:
UpperCamelCase = yaml.safe_load(f.read() )
# Get to the API doc
UpperCamelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCamelCase = content[api_idx]["""sections"""]
# Then to the model doc
UpperCamelCase = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
UpperCamelCase = False
UpperCamelCase = api_doc[pipeline_idx]["""sections"""]
UpperCamelCase = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
UpperCamelCase = pipeline_doc["""section"""]
UpperCamelCase = clean_doc_toc(__UpperCamelCase )
if overwrite:
UpperCamelCase = new_sub_pipeline_doc
new_pipeline_docs.append(__UpperCamelCase )
# sort overall pipeline doc
UpperCamelCase = clean_doc_toc(__UpperCamelCase )
if new_pipeline_docs != pipeline_docs:
UpperCamelCase = True
if overwrite:
UpperCamelCase = new_pipeline_docs
if diff:
if overwrite:
UpperCamelCase = api_doc
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(__UpperCamelCase , allow_unicode=__UpperCamelCase ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 321 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
UpperCamelCase__ :int = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , UpperCamelCase_ = 1 , UpperCamelCase_ = None , UpperCamelCase_ = 0.0 , UpperCamelCase_ = 50 , UpperCamelCase_ = None , UpperCamelCase_ = "pil" , UpperCamelCase_ = True , ):
'''simple docstring'''
if isinstance(self.unet.config.sample_size , _SCREAMING_SNAKE_CASE ):
UpperCamelCase__ :Optional[int] = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
UpperCamelCase__ :List[Any] = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(_SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(_SCREAMING_SNAKE_CASE )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
UpperCamelCase__ :Optional[Any] = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCamelCase__ :Union[str, Any] = self.unet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCamelCase__ :Union[str, Any] = self.scheduler.step(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , use_clipped_model_output=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase__ :Dict = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase__ :Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ :Any = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE ) | 97 |
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[str]:
UpperCamelCase = 1.5
UpperCamelCase = int(factor * num_class_images )
UpperCamelCase = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=__UpperCamelCase , aesthetic_weight=0.1 )
os.makedirs(F"{class_data_dir}/images" , exist_ok=__UpperCamelCase )
if len(list(Path(F"{class_data_dir}/images" ).iterdir() ) ) >= num_class_images:
return
while True:
UpperCamelCase = client.query(text=__UpperCamelCase )
if len(__UpperCamelCase ) >= factor * num_class_images or num_images > 1E4:
break
else:
UpperCamelCase = int(factor * num_images )
UpperCamelCase = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=__UpperCamelCase , aesthetic_weight=0.1 , )
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = tqdm(desc="""downloading real regularization images""" , total=__UpperCamelCase )
with open(F"{class_data_dir}/caption.txt" , """w""" ) as fa, open(F"{class_data_dir}/urls.txt" , """w""" ) as fa, open(
F"{class_data_dir}/images.txt" , """w""" ) as fa:
while total < num_class_images:
UpperCamelCase = class_images[count]
count += 1
try:
UpperCamelCase = requests.get(images["""url"""] )
if img.status_code == 200:
UpperCamelCase = Image.open(BytesIO(img.content ) )
with open(F"{class_data_dir}/images/{total}.jpg" , """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(F"{class_data_dir}/images/{total}.jpg" + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowercase__ ( )-> str:
UpperCamelCase = argparse.ArgumentParser("""""" , add_help=__UpperCamelCase )
parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=__UpperCamelCase , type=__UpperCamelCase )
parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=__UpperCamelCase , type=__UpperCamelCase )
parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=__UpperCamelCase )
return parser.parse_args()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 321 | 0 |
_lowerCamelCase =[0, 2, 4, 6, 8]
_lowerCamelCase =[1, 3, 5, 7, 9]
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1, -1, -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
lowerCamelCase : int = 0
for digit in range(10 ):
lowerCamelCase : Tuple = digit
result += reversible_numbers(
0, (remainder + 2 * digit) // 10, __UpperCamelCase, __UpperCamelCase )
return result
lowerCamelCase : Optional[int] = 0
for digita in range(10 ):
lowerCamelCase : List[str] = digita
if (remainder + digita) % 2 == 0:
lowerCamelCase : List[Any] = ODD_DIGITS
else:
lowerCamelCase : str = EVEN_DIGITS
for digita in other_parity_digits:
lowerCamelCase : Union[str, Any] = digita
result += reversible_numbers(
remaining_length - 2, (remainder + digita + digita) // 10, __UpperCamelCase, __UpperCamelCase, )
return result
def _a ( lowerCamelCase = 9 ):
lowerCamelCase : List[Any] = 0
for length in range(1, max_power + 1 ):
result += reversible_numbers(__UpperCamelCase, 0, [0] * length, __UpperCamelCase )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 287 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
@dataclass
class a_ :
lowercase = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
lowercase = field(
default=lowerCamelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
lowercase = field(
default=lowerCamelCase , metadata={"""help""": """The column name of the images in the files."""} )
lowercase = field(default=lowerCamelCase , metadata={"""help""": """A folder containing the training data."""} )
lowercase = field(default=lowerCamelCase , metadata={"""help""": """A folder containing the validation data."""} )
lowercase = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
lowercase = field(
default=lowerCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowercase = field(
default=lowerCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = {}
if self.train_dir is not None:
UpperCamelCase = self.train_dir
if self.validation_dir is not None:
UpperCamelCase = self.validation_dir
UpperCamelCase = data_files if data_files else None
@dataclass
class a_ :
lowercase = field(
default=lowerCamelCase , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
lowercase = field(
default=lowerCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
lowercase = field(
default=lowerCamelCase , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
lowercase = field(
default=lowerCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
lowercase = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowercase = field(default=lowerCamelCase , metadata={"""help""": """Name or path of preprocessor config."""} )
lowercase = field(
default=lowerCamelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
lowercase = field(
default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
lowercase = field(
default=lowerCamelCase , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class a_ ( lowerCamelCase ):
lowercase = field(
default=1E-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def lowercase__ ( __UpperCamelCase )-> int:
UpperCamelCase = torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def lowercase__ ( )-> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase ,UpperCamelCase ,UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase ,UpperCamelCase ,UpperCamelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , __UpperCamelCase , __UpperCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase = training_args.get_process_log_level()
logger.setLevel(__UpperCamelCase )
transformers.utils.logging.set_verbosity(__UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
UpperCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
UpperCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCamelCase = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __UpperCamelCase ) and data_args.train_val_split > 0.0:
UpperCamelCase = ds["""train"""].train_test_split(data_args.train_val_split )
UpperCamelCase = split["""train"""]
UpperCamelCase = split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
UpperCamelCase = ViTMAEConfig.from_pretrained(model_args.config_name , **__UpperCamelCase )
elif model_args.model_name_or_path:
UpperCamelCase = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__UpperCamelCase )
else:
UpperCamelCase = ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(F"New config: {config}" )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
UpperCamelCase = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__UpperCamelCase )
elif model_args.model_name_or_path:
UpperCamelCase = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__UpperCamelCase )
else:
UpperCamelCase = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
UpperCamelCase = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
UpperCamelCase = ViTMAEForPreTraining(__UpperCamelCase )
if training_args.do_train:
UpperCamelCase = ds["""train"""].column_names
else:
UpperCamelCase = ds["""validation"""].column_names
if data_args.image_column_name is not None:
UpperCamelCase = data_args.image_column_name
elif "image" in column_names:
UpperCamelCase = """image"""
elif "img" in column_names:
UpperCamelCase = """img"""
else:
UpperCamelCase = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
UpperCamelCase = image_processor.size["""shortest_edge"""]
else:
UpperCamelCase = (image_processor.size["""height"""], image_processor.size["""width"""])
UpperCamelCase = Compose(
[
Lambda(lambda __UpperCamelCase : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(__UpperCamelCase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(__UpperCamelCase ):
UpperCamelCase = [transforms(__UpperCamelCase ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
UpperCamelCase = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__UpperCamelCase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
UpperCamelCase = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__UpperCamelCase )
# Compute absolute learning rate
UpperCamelCase = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
UpperCamelCase = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
UpperCamelCase = Trainer(
model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=__UpperCamelCase , data_collator=__UpperCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase = last_checkpoint
UpperCamelCase = trainer.train(resume_from_checkpoint=__UpperCamelCase )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase = trainer.evaluate()
trainer.log_metrics("""eval""" , __UpperCamelCase )
trainer.save_metrics("""eval""" , __UpperCamelCase )
# Write model card and (optionally) push to hub
UpperCamelCase = {
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__UpperCamelCase )
else:
trainer.create_model_card(**__UpperCamelCase )
def lowercase__ ( __UpperCamelCase )-> List[str]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 321 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase_ = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase_ = {
'squeezebert/squeezebert-uncased': 512,
'squeezebert/squeezebert-mnli': 512,
'squeezebert/squeezebert-mnli-headless': 512,
}
lowerCAmelCase_ = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Any = VOCAB_FILES_NAMES
lowerCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : Union[str, Any] = SqueezeBertTokenizer
def __init__( self : str ,_snake_case : Any=None ,_snake_case : Tuple=None ,_snake_case : Union[str, Any]=True ,_snake_case : Union[str, Any]="[UNK]" ,_snake_case : Tuple="[SEP]" ,_snake_case : Dict="[PAD]" ,_snake_case : Dict="[CLS]" ,_snake_case : List[str]="[MASK]" ,_snake_case : List[Any]=True ,_snake_case : List[Any]=None ,**_snake_case : Dict ,) -> Optional[Any]:
"""simple docstring"""
super().__init__(
_SCREAMING_SNAKE_CASE ,tokenizer_file=_SCREAMING_SNAKE_CASE ,do_lower_case=_SCREAMING_SNAKE_CASE ,unk_token=_SCREAMING_SNAKE_CASE ,sep_token=_SCREAMING_SNAKE_CASE ,pad_token=_SCREAMING_SNAKE_CASE ,cls_token=_SCREAMING_SNAKE_CASE ,mask_token=_SCREAMING_SNAKE_CASE ,tokenize_chinese_chars=_SCREAMING_SNAKE_CASE ,strip_accents=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
lowercase__ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' ,_SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get('''strip_accents''' ,_SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' ,_SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
lowercase__ : Optional[int] = getattr(_SCREAMING_SNAKE_CASE ,normalizer_state.pop('''type''' ) )
lowercase__ : int = do_lower_case
lowercase__ : Dict = strip_accents
lowercase__ : Optional[int] = tokenize_chinese_chars
lowercase__ : Optional[int] = normalizer_class(**_SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = do_lower_case
def UpperCAmelCase ( self : str ,_snake_case : Optional[Any] ,_snake_case : Optional[Any]=None ) -> int:
"""simple docstring"""
lowercase__ : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase ( self : int ,_snake_case : Optional[Any] ,_snake_case : Optional[int] = None ) -> List[int]:
"""simple docstring"""
lowercase__ : List[Any] = [self.sep_token_id]
lowercase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self : Tuple ,_snake_case : Union[str, Any] ,_snake_case : List[Any] = None ) -> Tuple[str]:
"""simple docstring"""
lowercase__ : List[str] = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE ,name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
| 16 |
'''simple docstring'''
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # pylint: disable=invalid-name
SCREAMING_SNAKE_CASE__ = 2_5_6
class a_ ( lowerCamelCase ):
lowercase = ["""melgan"""]
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> None:
"""simple docstring"""
super().__init__()
# From MELGAN
UpperCamelCase = math.log(1e-5 ) # Matches MelGAN training.
UpperCamelCase = 4.0 # Largest value for most examples
UpperCamelCase = 128
self.register_modules(
notes_encoder=_SCREAMING_SNAKE_CASE , continuous_encoder=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , melgan=_SCREAMING_SNAKE_CASE , )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=(-1.0, 1.0) , _SCREAMING_SNAKE_CASE=False ) -> Any:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = output_range
if clip:
UpperCamelCase = torch.clip(_SCREAMING_SNAKE_CASE , self.min_value , self.max_value )
# Scale to [0, 1].
UpperCamelCase = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=(-1.0, 1.0) , _SCREAMING_SNAKE_CASE=False ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = input_range
UpperCamelCase = torch.clip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if clip else outputs
# Scale to [0, 1].
UpperCamelCase = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = input_tokens > 0
UpperCamelCase ,UpperCamelCase = self.notes_encoder(
encoder_input_tokens=_SCREAMING_SNAKE_CASE , encoder_inputs_mask=_SCREAMING_SNAKE_CASE )
UpperCamelCase ,UpperCamelCase = self.continuous_encoder(
encoder_inputs=_SCREAMING_SNAKE_CASE , encoder_inputs_mask=_SCREAMING_SNAKE_CASE )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase = noise_time
if not torch.is_tensor(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(_SCREAMING_SNAKE_CASE ) and len(timesteps.shape ) == 0:
UpperCamelCase = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCamelCase = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
UpperCamelCase = self.decoder(
encodings_and_masks=_SCREAMING_SNAKE_CASE , decoder_input_tokens=_SCREAMING_SNAKE_CASE , decoder_noise_time=_SCREAMING_SNAKE_CASE )
return logits
@torch.no_grad()
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 100 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = "numpy" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
"""simple docstring"""
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(_SCREAMING_SNAKE_CASE )}." )
UpperCamelCase = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
UpperCamelCase = np.zeros([1, 0, self.n_dims] , np.floataa )
UpperCamelCase = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_SCREAMING_SNAKE_CASE , device=self.device )
for i, encoder_input_tokens in enumerate(_SCREAMING_SNAKE_CASE ):
if i == 0:
UpperCamelCase = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
UpperCamelCase = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_SCREAMING_SNAKE_CASE , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
UpperCamelCase = ones
UpperCamelCase = self.scale_features(
_SCREAMING_SNAKE_CASE , output_range=[-1.0, 1.0] , clip=_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_SCREAMING_SNAKE_CASE , continuous_mask=_SCREAMING_SNAKE_CASE , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
UpperCamelCase = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=_SCREAMING_SNAKE_CASE , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCamelCase = self.decode(
encodings_and_masks=_SCREAMING_SNAKE_CASE , input_tokens=_SCREAMING_SNAKE_CASE , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
UpperCamelCase = self.scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase = self.scale_to_features(_SCREAMING_SNAKE_CASE , input_range=[-1.0, 1.0] )
UpperCamelCase = mel[:1]
UpperCamelCase = mel.cpu().float().numpy()
UpperCamelCase = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
logger.info("""Generated segment""" , _SCREAMING_SNAKE_CASE )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"""Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'.""" )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"""Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'.""" )
if output_type == "numpy":
UpperCamelCase = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
UpperCamelCase = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=_SCREAMING_SNAKE_CASE )
| 321 | 0 |
"""simple docstring"""
import math
import random
def __a ( __lowerCamelCase, __lowerCamelCase = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
_a = 0.02
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Tuple = float(2 * (random.randint(1, 100 )) - 1 )
for _ in range(__UpperCamelCase ):
# Forward propagation
UpperCAmelCase_ : Tuple = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
UpperCAmelCase_ : Optional[int] = (expected / 100) - layer_a
# Error delta
UpperCAmelCase_ : Optional[Any] = layer_1_error * sigmoid_function(__UpperCamelCase, __UpperCamelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
_a = int(input('Expected value: '))
_a = int(input('Number of propagations: '))
print(forward_propagation(expected, number_propagations))
| 61 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase = 4000000 )-> int:
UpperCamelCase = []
UpperCamelCase ,UpperCamelCase = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__UpperCamelCase )
UpperCamelCase ,UpperCamelCase = b, a + b
return sum(__UpperCamelCase )
if __name__ == "__main__":
print(f'{solution() = }')
| 321 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : Optional[Any] , _a : Optional[int] , _a : Optional[int]=7 , _a : str=3 , _a : Union[str, Any]=18 , _a : Union[str, Any]=30 , _a : Dict=400 , _a : Dict=True , _a : int=None , _a : List[str]=True , _a : int=None , _a : str=True , _a : List[str]=[0.5, 0.5, 0.5] , _a : Dict=[0.5, 0.5, 0.5] , ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =size if size is not None else {'shortest_edge': 18}
_SCREAMING_SNAKE_CASE =crop_size if crop_size is not None else {'height': 18, 'width': 18}
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =min_resolution
_SCREAMING_SNAKE_CASE =max_resolution
_SCREAMING_SNAKE_CASE =do_resize
_SCREAMING_SNAKE_CASE =size
_SCREAMING_SNAKE_CASE =do_center_crop
_SCREAMING_SNAKE_CASE =crop_size
_SCREAMING_SNAKE_CASE =do_normalize
_SCREAMING_SNAKE_CASE =image_mean
_SCREAMING_SNAKE_CASE =image_std
def A ( self : Optional[Any] ) -> Any:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class A__ ( A__ , unittest.TestCase ):
A__ = LevitImageProcessor if is_vision_available() else None
def A ( self : List[str] ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =LevitImageProcessingTester(self )
@property
def A ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : str ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_center_crop' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
def A ( self : List[str] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
_SCREAMING_SNAKE_CASE =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def A ( self : List[Any] ) -> Tuple:
'''simple docstring'''
pass
def A ( self : List[Any] ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def A ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def A ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 47 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__UpperCamelCase ) )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> bool:
# Base Case
if index == len(__UpperCamelCase ):
return True
# Recursive Step
for i in range(__UpperCamelCase ):
if valid_coloring(graph[index] , __UpperCamelCase , __UpperCamelCase ):
# Color current vertex
UpperCamelCase = i
# Validate coloring
if util_color(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , index + 1 ):
return True
# Backtrack
UpperCamelCase = -1
return False
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> list[int]:
UpperCamelCase = [-1] * len(__UpperCamelCase )
if util_color(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , 0 ):
return colored_vertices
return []
| 321 | 0 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_a : Union[str, Any]= logging.get_logger(__name__)
_a : List[Any]= {
"facebook/detr-resnet-50": "https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class UpperCamelCase ( lowercase ):
UpperCAmelCase : int = """detr"""
UpperCAmelCase : str = ["""past_key_values"""]
UpperCAmelCase : Any = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__(self : Union[str, Any] , _A : List[str]=True , _A : Optional[Any]=None , _A : Optional[Any]=3 , _A : Optional[int]=1_00 , _A : Union[str, Any]=6 , _A : Any=20_48 , _A : Optional[int]=8 , _A : List[Any]=6 , _A : List[str]=20_48 , _A : str=8 , _A : Any=0.0 , _A : Optional[Any]=0.0 , _A : Optional[Any]=True , _A : List[Any]="relu" , _A : str=2_56 , _A : Tuple=0.1 , _A : Any=0.0 , _A : str=0.0 , _A : List[str]=0.02 , _A : List[Any]=1.0 , _A : List[Any]=False , _A : Optional[Any]="sine" , _A : Dict="resnet50" , _A : Optional[int]=True , _A : Optional[Any]=False , _A : Any=1 , _A : str=5 , _A : Dict=2 , _A : List[str]=1 , _A : Optional[Any]=1 , _A : Any=5 , _A : str=2 , _A : List[Any]=0.1 , **_A : Optional[int] , ) -> int:
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.')
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
__snake_case : Tuple = CONFIG_MAPPING['resnet'](out_features=['stage4'])
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
__snake_case : Tuple = backbone_config.get('model_type')
__snake_case : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
__snake_case : Optional[int] = config_class.from_dict(_SCREAMING_SNAKE_CASE)
# set timm attributes to None
__snake_case , __snake_case , __snake_case : Union[str, Any] = None, None, None
__snake_case : Optional[int] = use_timm_backbone
__snake_case : Tuple = backbone_config
__snake_case : Tuple = num_channels
__snake_case : Tuple = num_queries
__snake_case : Dict = d_model
__snake_case : Any = encoder_ffn_dim
__snake_case : Union[str, Any] = encoder_layers
__snake_case : Optional[int] = encoder_attention_heads
__snake_case : str = decoder_ffn_dim
__snake_case : Any = decoder_layers
__snake_case : Union[str, Any] = decoder_attention_heads
__snake_case : Union[str, Any] = dropout
__snake_case : str = attention_dropout
__snake_case : Dict = activation_dropout
__snake_case : Dict = activation_function
__snake_case : Tuple = init_std
__snake_case : Union[str, Any] = init_xavier_std
__snake_case : Optional[Any] = encoder_layerdrop
__snake_case : List[str] = decoder_layerdrop
__snake_case : List[Any] = encoder_layers
__snake_case : Tuple = auxiliary_loss
__snake_case : str = position_embedding_type
__snake_case : Union[str, Any] = backbone
__snake_case : Any = use_pretrained_backbone
__snake_case : Union[str, Any] = dilation
# Hungarian matcher
__snake_case : str = class_cost
__snake_case : Tuple = bbox_cost
__snake_case : Union[str, Any] = giou_cost
# Loss coefficients
__snake_case : Any = mask_loss_coefficient
__snake_case : Optional[Any] = dice_loss_coefficient
__snake_case : Union[str, Any] = bbox_loss_coefficient
__snake_case : List[Any] = giou_loss_coefficient
__snake_case : str = eos_coefficient
super().__init__(is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
@property
def _lowercase (self : List[str]) -> int:
return self.encoder_attention_heads
@property
def _lowercase (self : Any) -> int:
return self.d_model
@classmethod
def _lowercase (cls : Union[str, Any] , _A : str , **_A : List[Any]) -> Tuple:
return cls(backbone_config=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
def _lowercase (self : Optional[Any]) -> Dict[str, any]:
__snake_case : List[Any] = copy.deepcopy(self.__dict__)
if output["backbone_config"] is not None:
__snake_case : Union[str, Any] = self.backbone_config.to_dict()
__snake_case : Tuple = self.__class__.model_type
return output
class UpperCamelCase ( lowercase ):
UpperCAmelCase : List[Any] = version.parse("""1.11""" )
@property
def _lowercase (self : Dict) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
])
@property
def _lowercase (self : Tuple) -> float:
return 1E-5
@property
def _lowercase (self : Optional[int]) -> int:
return 12
| 172 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase = 2000000 )-> int:
UpperCamelCase = [0 for i in range(n + 1 )]
UpperCamelCase = 1
UpperCamelCase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , __UpperCamelCase ):
UpperCamelCase = 1
UpperCamelCase = 0
for i in range(__UpperCamelCase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'{solution() = }')
| 321 | 0 |
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a ) -> None:
_a : Any = set_counts
_a : str = max(_SCREAMING_SNAKE_CASE )
_a : str = len(_SCREAMING_SNAKE_CASE )
_a : Dict = [1] * num_sets
_a : Optional[int] = list(range(_SCREAMING_SNAKE_CASE ) )
def __lowercase ( self , _a , _a ) -> bool:
_a : Union[str, Any] = self.get_parent(_SCREAMING_SNAKE_CASE )
_a : Union[str, Any] = self.get_parent(_SCREAMING_SNAKE_CASE )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
_a : Union[str, Any] = 0
_a : List[Any] = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
_a : Any = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
_a : Optional[Any] = 0
_a : Tuple = src_parent
_a : List[Any] = self.set_counts[src_parent]
_a : str = max(self.max_set , _SCREAMING_SNAKE_CASE )
return True
def __lowercase ( self , _a ) -> int:
if self.parents[disj_set] == disj_set:
return disj_set
_a : str = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 235 |
'''simple docstring'''
from timeit import timeit
def lowercase__ ( __UpperCamelCase )-> int:
if number < 0:
raise ValueError("""the value of input must not be negative""" )
UpperCamelCase = 0
while number:
number &= number - 1
result += 1
return result
def lowercase__ ( __UpperCamelCase )-> int:
if number < 0:
raise ValueError("""the value of input must not be negative""" )
UpperCamelCase = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def lowercase__ ( )-> None:
def do_benchmark(__UpperCamelCase ) -> None:
UpperCamelCase = """import __main__ as z"""
print(F"Benchmark when {number = }:" )
print(F"{get_set_bits_count_using_modulo_operator(__UpperCamelCase ) = }" )
UpperCamelCase = timeit("""z.get_set_bits_count_using_modulo_operator(25)""" , setup=__UpperCamelCase )
print(F"timeit() runs in {timing} seconds" )
print(F"{get_set_bits_count_using_brian_kernighans_algorithm(__UpperCamelCase ) = }" )
UpperCamelCase = timeit(
"""z.get_set_bits_count_using_brian_kernighans_algorithm(25)""" , setup=__UpperCamelCase , )
print(F"timeit() runs in {timing} seconds" )
for number in (25, 37, 58, 0):
do_benchmark(__UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 321 | 0 |
"""simple docstring"""
import random
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> tuple:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = [], [], []
for element in data:
if element < pivot:
less.append(__UpperCamelCase )
elif element > pivot:
greater.append(__UpperCamelCase )
else:
equal.append(__UpperCamelCase )
return less, equal, greater
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(__UpperCamelCase ) or index < 0:
return None
SCREAMING_SNAKE_CASE__ : Optional[Any] = items[random.randint(0 , len(__UpperCamelCase ) - 1 )]
SCREAMING_SNAKE_CASE__ : Tuple = 0
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = _partition(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ : str = len(__UpperCamelCase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__UpperCamelCase , __UpperCamelCase )
# must be in larger
else:
return quick_select(__UpperCamelCase , index - (m + count) )
| 132 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimesformerModel',
'TimesformerForVideoClassification',
'TimesformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 321 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : List[str] = {
'configuration_clap': [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapAudioConfig',
'ClapConfig',
'ClapTextConfig',
],
'processing_clap': ['ClapProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapModel',
'ClapPreTrainedModel',
'ClapTextModel',
'ClapTextModelWithProjection',
'ClapAudioModel',
'ClapAudioModelWithProjection',
]
__UpperCamelCase : Any = ['ClapFeatureExtractor']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__UpperCamelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 182 |
'''simple docstring'''
import math
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> float:
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__UpperCamelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 321 | 0 |
'''simple docstring'''
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
def a_ ( _lowerCAmelCase ) -> Union[str, Any]:
print('Loading config file...' )
def flatten_yaml_as_dict(_lowerCAmelCase ,_lowerCAmelCase="" ,_lowerCAmelCase="." ):
__lowerCamelCase : str = []
for k, v in d.items():
__lowerCamelCase : Optional[Any] = parent_key + sep + k if parent_key else k
if isinstance(__UpperCamelCase ,collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(__UpperCamelCase ,__UpperCamelCase ,sep=__UpperCamelCase ).items() )
else:
items.append((new_key, v) )
return dict(__UpperCamelCase )
__lowerCamelCase : Optional[Any] = argparse.Namespace()
with open(__UpperCamelCase ,'r' ) as yaml_file:
try:
__lowerCamelCase : List[str] = yaml.load(__UpperCamelCase ,Loader=yaml.FullLoader )
__lowerCamelCase : List[str] = flatten_yaml_as_dict(__UpperCamelCase )
for k, v in flat_cfg.items():
setattr(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
except yaml.YAMLError as exc:
logger.error('Error while loading config file: {}. Error message: {}'.format(__UpperCamelCase ,str(__UpperCamelCase ) ) )
return config
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> str:
__lowerCamelCase : str = MobileViTVaConfig()
__lowerCamelCase : int = False
# dataset
if task_name.startswith('imagenet1k_' ):
__lowerCamelCase : int = 1000
if int(task_name.strip().split('_' )[-1] ) == 384:
__lowerCamelCase : Dict = 384
else:
__lowerCamelCase : Any = 256
__lowerCamelCase : List[str] = 'imagenet-1k-id2label.json'
elif task_name.startswith('imagenet21k_to_1k_' ):
__lowerCamelCase : Any = 21000
if int(task_name.strip().split('_' )[-1] ) == 384:
__lowerCamelCase : List[str] = 384
else:
__lowerCamelCase : Tuple = 256
__lowerCamelCase : List[Any] = 'imagenet-22k-id2label.json'
elif task_name.startswith('ade20k_' ):
__lowerCamelCase : Union[str, Any] = 151
__lowerCamelCase : int = 512
__lowerCamelCase : Dict = 'ade20k-id2label.json'
__lowerCamelCase : List[str] = True
elif task_name.startswith('voc_' ):
__lowerCamelCase : Optional[Any] = 21
__lowerCamelCase : Optional[int] = 512
__lowerCamelCase : Union[str, Any] = 'pascal-voc-id2label.json'
__lowerCamelCase : Optional[Any] = True
# orig_config
__lowerCamelCase : int = load_orig_config_file(__UpperCamelCase )
assert getattr(__UpperCamelCase ,'model.classification.name' ,-1 ) == "mobilevit_v2", "Invalid model"
__lowerCamelCase : Union[str, Any] = getattr(__UpperCamelCase ,'model.classification.mitv2.width_multiplier' ,1.0 )
assert (
getattr(__UpperCamelCase ,'model.classification.mitv2.attn_norm_layer' ,-1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
__lowerCamelCase : Union[str, Any] = getattr(__UpperCamelCase ,'model.classification.activation.name' ,'swish' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
__lowerCamelCase : Any = getattr(__UpperCamelCase ,'model.segmentation.output_stride' ,16 )
if "_deeplabv3" in task_name:
__lowerCamelCase : str = getattr(__UpperCamelCase ,'model.segmentation.deeplabv3.aspp_rates' ,[12, 24, 36] )
__lowerCamelCase : List[str] = getattr(__UpperCamelCase ,'model.segmentation.deeplabv3.aspp_out_channels' ,512 )
__lowerCamelCase : Any = getattr(__UpperCamelCase ,'model.segmentation.deeplabv3.aspp_dropout' ,0.1 )
# id2label
__lowerCamelCase : Dict = 'huggingface/label-files'
__lowerCamelCase : List[str] = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type='dataset' ) ,'r' ) )
__lowerCamelCase : int = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
__lowerCamelCase : List[Any] = idalabel
__lowerCamelCase : str = {v: k for k, v in idalabel.items()}
return config
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> Dict:
__lowerCamelCase : Union[str, Any] = dct.pop(__UpperCamelCase )
__lowerCamelCase : str = val
def a_ ( _lowerCAmelCase ,_lowerCAmelCase=False ) -> Optional[int]:
if base_model:
__lowerCamelCase : Optional[int] = ''
else:
__lowerCamelCase : Dict = 'mobilevitv2.'
__lowerCamelCase : Union[str, Any] = []
for k in state_dict.keys():
if k[:8] == "encoder.":
__lowerCamelCase : List[str] = k[8:]
else:
__lowerCamelCase : List[str] = k
if ".block." in k:
__lowerCamelCase : str = k_new.replace('.block.' ,'.' )
if ".conv." in k:
__lowerCamelCase : Union[str, Any] = k_new.replace('.conv.' ,'.convolution.' )
if ".norm." in k:
__lowerCamelCase : Any = k_new.replace('.norm.' ,'.normalization.' )
if "conv_1." in k:
__lowerCamelCase : Any = k_new.replace('conv_1.' ,F'{model_prefix}conv_stem.' )
for i in [1, 2]:
if F'layer_{i}.' in k:
__lowerCamelCase : Dict = k_new.replace(F'layer_{i}.' ,F'{model_prefix}encoder.layer.{i-1}.layer.' )
if ".exp_1x1." in k:
__lowerCamelCase : List[str] = k_new.replace('.exp_1x1.' ,'.expand_1x1.' )
if ".red_1x1." in k:
__lowerCamelCase : Dict = k_new.replace('.red_1x1.' ,'.reduce_1x1.' )
for i in [3, 4, 5]:
if F'layer_{i}.0.' in k:
__lowerCamelCase : List[str] = k_new.replace(F'layer_{i}.0.' ,F'{model_prefix}encoder.layer.{i-1}.downsampling_layer.' )
if F'layer_{i}.1.local_rep.0.' in k:
__lowerCamelCase : Optional[Any] = k_new.replace(F'layer_{i}.1.local_rep.0.' ,F'{model_prefix}encoder.layer.{i-1}.conv_kxk.' )
if F'layer_{i}.1.local_rep.1.' in k:
__lowerCamelCase : Union[str, Any] = k_new.replace(F'layer_{i}.1.local_rep.1.' ,F'{model_prefix}encoder.layer.{i-1}.conv_1x1.' )
for i in [3, 4, 5]:
if i == 3:
__lowerCamelCase : Union[str, Any] = [0, 1]
elif i == 4:
__lowerCamelCase : Dict = [0, 1, 2, 3]
elif i == 5:
__lowerCamelCase : List[Any] = [0, 1, 2]
for j in j_in:
if F'layer_{i}.1.global_rep.{j}.' in k:
__lowerCamelCase : Optional[int] = k_new.replace(
F'layer_{i}.1.global_rep.{j}.' ,F'{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.' )
if F'layer_{i}.1.global_rep.{j+1}.' in k:
__lowerCamelCase : List[str] = k_new.replace(
F'layer_{i}.1.global_rep.{j+1}.' ,F'{model_prefix}encoder.layer.{i-1}.layernorm.' )
if F'layer_{i}.1.conv_proj.' in k:
__lowerCamelCase : List[str] = k_new.replace(F'layer_{i}.1.conv_proj.' ,F'{model_prefix}encoder.layer.{i-1}.conv_projection.' )
if "pre_norm_attn.0." in k:
__lowerCamelCase : List[str] = k_new.replace('pre_norm_attn.0.' ,'layernorm_before.' )
if "pre_norm_attn.1." in k:
__lowerCamelCase : Dict = k_new.replace('pre_norm_attn.1.' ,'attention.' )
if "pre_norm_ffn.0." in k:
__lowerCamelCase : List[Any] = k_new.replace('pre_norm_ffn.0.' ,'layernorm_after.' )
if "pre_norm_ffn.1." in k:
__lowerCamelCase : Tuple = k_new.replace('pre_norm_ffn.1.' ,'ffn.conv1.' )
if "pre_norm_ffn.3." in k:
__lowerCamelCase : Optional[int] = k_new.replace('pre_norm_ffn.3.' ,'ffn.conv2.' )
if "classifier.1." in k:
__lowerCamelCase : Optional[int] = k_new.replace('classifier.1.' ,'classifier.' )
if "seg_head." in k:
__lowerCamelCase : Optional[Any] = k_new.replace('seg_head.' ,'segmentation_head.' )
if ".aspp_layer." in k:
__lowerCamelCase : int = k_new.replace('.aspp_layer.' ,'.' )
if ".aspp_pool." in k:
__lowerCamelCase : str = k_new.replace('.aspp_pool.' ,'.' )
rename_keys.append((k, k_new) )
return rename_keys
def a_ ( _lowerCAmelCase ) -> Union[str, Any]:
__lowerCamelCase : Dict = []
for k in state_dict.keys():
if k.startswith('seg_head.aux_head.' ):
keys_to_ignore.append(__UpperCamelCase )
for k in keys_to_ignore:
state_dict.pop(__UpperCamelCase ,__UpperCamelCase )
def a_ ( ) -> Tuple:
__lowerCamelCase : str = 'http://images.cocodataset.org/val2017/000000039769.jpg'
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
__lowerCamelCase : Optional[int] = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> List[Any]:
__lowerCamelCase : Optional[Any] = get_mobilevitva_config(__UpperCamelCase ,__UpperCamelCase )
# load original state_dict
__lowerCamelCase : Union[str, Any] = torch.load(__UpperCamelCase ,map_location='cpu' )
# load huggingface model
if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ):
__lowerCamelCase : List[str] = MobileViTVaForSemanticSegmentation(__UpperCamelCase ).eval()
__lowerCamelCase : Optional[Any] = False
else:
__lowerCamelCase : str = MobileViTVaForImageClassification(__UpperCamelCase ).eval()
__lowerCamelCase : Any = False
# remove and rename some keys of load the original model
__lowerCamelCase : int = checkpoint
remove_unused_keys(__UpperCamelCase )
__lowerCamelCase : Dict = create_rename_keys(__UpperCamelCase ,base_model=__UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# load modified state_dict
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
__lowerCamelCase : str = MobileViTImageProcessor(crop_size=config.image_size ,size=config.image_size + 32 )
__lowerCamelCase : Tuple = image_processor(images=prepare_img() ,return_tensors='pt' )
__lowerCamelCase : int = model(**__UpperCamelCase )
# verify classification model
if task_name.startswith('imagenet' ):
__lowerCamelCase : List[Any] = outputs.logits
__lowerCamelCase : Union[str, Any] = logits.argmax(-1 ).item()
print('Predicted class:' ,model.config.idalabel[predicted_class_idx] )
if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
__lowerCamelCase : Optional[Any] = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] )
assert torch.allclose(logits[0, :3] ,__UpperCamelCase ,atol=1E-4 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(F'Saving model {task_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__UpperCamelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task',
default='imagenet1k_256',
type=str,
help=(
'Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '
'\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n '
),
choices=[
'imagenet1k_256',
'imagenet1k_384',
'imagenet21k_to_1k_256',
'imagenet21k_to_1k_384',
'ade20k_deeplabv3',
'voc_deeplabv3',
],
)
parser.add_argument(
'--orig_checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument('--orig_config_path', required=True, type=str, help='Path to the original config file.')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
_UpperCamelCase = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 208 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
SCREAMING_SNAKE_CASE__ = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
SCREAMING_SNAKE_CASE__ = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
SCREAMING_SNAKE_CASE__ = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def A__ ( self ) -> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {
"""id""": datasets.Value("""string""" ),
"""prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ),
},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://www.atticusprojectai.org/cuad"""] , reference_urls=["""https://www.atticusprojectai.org/cuad"""] , )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
UpperCamelCase = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
UpperCamelCase = evaluate(dataset=_SCREAMING_SNAKE_CASE , predictions=_SCREAMING_SNAKE_CASE )
return score
| 321 | 0 |
'''simple docstring'''
a__ : Optional[int] = range(2, 2_0 + 1)
a__ : Optional[int] = [1_0**k for k in range(ks[-1] + 1)]
a__ : Union[str, Any] = {}
def snake_case ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )-> List[str]:
"""simple docstring"""
__A = sum(a_i[j] for j in range(__UpperCamelCase , len(__UpperCamelCase ) ) )
__A = sum(a_i[j] * base[j] for j in range(min(len(__UpperCamelCase ) , __UpperCamelCase ) ) )
__A , __A = 0, 0
__A = n - i
__A = memo.get(__UpperCamelCase )
if sub_memo is not None:
__A = sub_memo.get(__UpperCamelCase )
if jumps is not None and len(__UpperCamelCase ) > 0:
# find and make the largest jump without going over
__A = -1
for _k in range(len(__UpperCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
__A = _k
break
if max_jump >= 0:
__A , __A , __A = jumps[max_jump]
# since the difference between jumps is cached, add c
__A = diff + c
for j in range(min(__UpperCamelCase , len(__UpperCamelCase ) ) ):
__A , __A = divmod(__UpperCamelCase , 1_0 )
if new_c > 0:
add(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
__A = []
else:
__A = {c: []}
__A = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
__A , __A = next_term(__UpperCamelCase , k - 1 , i + dn , __UpperCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
__A , __A = compute(__UpperCamelCase , __UpperCamelCase , i + dn , __UpperCamelCase )
diff += _diff
dn += terms_jumped
__A = sub_memo[c]
# keep jumps sorted by # of terms skipped
__A = 0
while j < len(__UpperCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(__UpperCamelCase , (diff, dn, k) )
return (diff, dn)
def snake_case ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )-> Tuple:
"""simple docstring"""
if i >= n:
return 0, i
if k > len(__UpperCamelCase ):
a_i.extend([0 for _ in range(k - len(__UpperCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
__A = i
__A , __A , __A = 0, 0, 0
for j in range(len(__UpperCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
__A = ds_c + ds_b
diff += addend
__A = 0
for j in range(__UpperCamelCase ):
__A = a_i[j] + addend
__A , __A = divmod(__UpperCamelCase , 1_0 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return diff, i - start_i
def snake_case ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )-> Optional[int]:
"""simple docstring"""
for j in range(__UpperCamelCase , len(__UpperCamelCase ) ):
__A = digits[j] + addend
if s >= 1_0:
__A , __A = divmod(__UpperCamelCase , 1_0 )
__A = addend // 1_0 + quotient
else:
__A = s
__A = addend // 1_0
if addend == 0:
break
while addend > 0:
__A , __A = divmod(__UpperCamelCase , 1_0 )
digits.append(__UpperCamelCase )
def snake_case ( UpperCAmelCase = 1_0**1_5 )-> int:
"""simple docstring"""
__A = [1]
__A = 1
__A = 0
while True:
__A , __A = next_term(__UpperCamelCase , 2_0 , i + dn , __UpperCamelCase )
dn += terms_jumped
if dn == n - i:
break
__A = 0
for j in range(len(__UpperCamelCase ) ):
a_n += digits[j] * 1_0**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 161 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase )-> int:
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
UpperCamelCase = 1
UpperCamelCase = 1
while repunit:
UpperCamelCase = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def lowercase__ ( __UpperCamelCase = 1000000 )-> int:
UpperCamelCase = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(__UpperCamelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f'{solution() = }')
| 321 | 0 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=10 , UpperCamelCase_=3 , UpperCamelCase_=2 , UpperCamelCase_=2 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=32 , UpperCamelCase_=5 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=10 , UpperCamelCase_=0.02 , UpperCamelCase_="divided_space_time" , UpperCamelCase_=None , ):
'''simple docstring'''
UpperCamelCase__ :Tuple = parent
UpperCamelCase__ :int = batch_size
UpperCamelCase__ :Any = image_size
UpperCamelCase__ :Any = num_channels
UpperCamelCase__ :Any = patch_size
UpperCamelCase__ :List[str] = num_frames
UpperCamelCase__ :Optional[int] = is_training
UpperCamelCase__ :Union[str, Any] = use_labels
UpperCamelCase__ :int = hidden_size
UpperCamelCase__ :Tuple = num_hidden_layers
UpperCamelCase__ :Dict = num_attention_heads
UpperCamelCase__ :Tuple = intermediate_size
UpperCamelCase__ :int = hidden_act
UpperCamelCase__ :int = hidden_dropout_prob
UpperCamelCase__ :Tuple = attention_probs_dropout_prob
UpperCamelCase__ :Optional[Any] = attention_type
UpperCamelCase__ :int = initializer_range
UpperCamelCase__ :int = scope
UpperCamelCase__ :List[Any] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
UpperCamelCase__ :Tuple = (image_size // patch_size) ** 2
UpperCamelCase__ :Dict = (num_frames) * self.num_patches_per_frame + 1
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ :Tuple = None
if self.use_labels:
UpperCamelCase__ :str = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase__ :Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
UpperCamelCase__ :List[Any] = self.num_labels
return config
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = TimesformerModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ :Any = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :int = TimesformerForVideoClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ :Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
# verify the logits shape
UpperCamelCase__ :List[str] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[Any] = config_and_inputs
UpperCamelCase__ :Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_a = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
_a = (
{'feature-extraction': TimesformerModel, 'video-classification': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = TimesformerModelTester(self )
UpperCamelCase__ :List[str] = ConfigTester(
self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = copy.deepcopy(_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
UpperCamelCase__ :Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
return inputs_dict
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''TimeSformer does not use inputs_embeds''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ :List[Any] = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase__ :Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ :Any = model_class(_SCREAMING_SNAKE_CASE )
UpperCamelCase__ :Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ :int = [*signature.parameters.keys()]
UpperCamelCase__ :Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*_SCREAMING_SNAKE_CASE )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :Optional[int] = TimesformerModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if not self.has_attentions:
pass
else:
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ :Any = True
for model_class in self.all_model_classes:
UpperCamelCase__ :Dict = self.model_tester.seq_length
UpperCamelCase__ :Optional[Any] = self.model_tester.num_frames
UpperCamelCase__ :List[str] = True
UpperCamelCase__ :Optional[Any] = False
UpperCamelCase__ :Union[str, Any] = True
UpperCamelCase__ :Any = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCamelCase__ :Union[str, Any] = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ :Optional[Any] = outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCamelCase__ :Tuple = True
UpperCamelCase__ :Dict = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCamelCase__ :str = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ :int = outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
UpperCamelCase__ :Optional[Any] = len(_SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
UpperCamelCase__ :Optional[int] = True
UpperCamelCase__ :Optional[int] = True
UpperCamelCase__ :str = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCamelCase__ :Optional[Any] = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + 1 , len(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ :Union[str, Any] = outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
def check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase__ :Optional[Any] = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCamelCase__ :str = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ :Any = outputs.hidden_states
UpperCamelCase__ :Tuple = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
UpperCamelCase__ :int = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
UpperCamelCase__ , UpperCamelCase__ :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ :Optional[int] = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ :int = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def a ( ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :Dict = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
UpperCamelCase__ :Optional[int] = np.load(__UpperCamelCase )
return list(__UpperCamelCase )
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to(
_SCREAMING_SNAKE_CASE )
UpperCamelCase__ :List[Any] = self.default_image_processor
UpperCamelCase__ :Dict = prepare_video()
UpperCamelCase__ :List[Any] = image_processor(video[:8] , return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCamelCase__ :List[Any] = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
UpperCamelCase__ :List[str] = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase__ :Any = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) ) | 97 |
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance == 0:
return {"resistance": sqrt(pow(__UpperCamelCase , 2 ) - pow(__UpperCamelCase , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(__UpperCamelCase , 2 ) - pow(__UpperCamelCase , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(__UpperCamelCase , 2 ) + pow(__UpperCamelCase , 2 ) )}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 321 | 0 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""huggingface/time-series-transformer-tourism-monthly""": (
"""https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"""
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = """time_series_transformer"""
_UpperCAmelCase : List[str] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "student_t" , __magic_name__ = "nll" , __magic_name__ = 1 , __magic_name__ = [1, 2, 3, 4, 5, 6, 7] , __magic_name__ = "mean" , __magic_name__ = 0 , __magic_name__ = 0 , __magic_name__ = 0 , __magic_name__ = 0 , __magic_name__ = None , __magic_name__ = None , __magic_name__ = 3_2 , __magic_name__ = 3_2 , __magic_name__ = 2 , __magic_name__ = 2 , __magic_name__ = 2 , __magic_name__ = 2 , __magic_name__ = True , __magic_name__ = "gelu" , __magic_name__ = 6_4 , __magic_name__ = 0.1 , __magic_name__ = 0.1 , __magic_name__ = 0.1 , __magic_name__ = 0.1 , __magic_name__ = 0.1 , __magic_name__ = 1_0_0 , __magic_name__ = 0.02 , __magic_name__=True , **__magic_name__ , ):
lowerCamelCase : Dict = prediction_length
lowerCamelCase : Tuple = context_length or prediction_length
lowerCamelCase : int = distribution_output
lowerCamelCase : str = loss
lowerCamelCase : List[str] = input_size
lowerCamelCase : List[str] = num_time_features
lowerCamelCase : List[str] = lags_sequence
lowerCamelCase : List[Any] = scaling
lowerCamelCase : Union[str, Any] = num_dynamic_real_features
lowerCamelCase : Dict = num_static_real_features
lowerCamelCase : str = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(_SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
lowerCamelCase : str = cardinality
else:
lowerCamelCase : Dict = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(_SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
lowerCamelCase : List[str] = embedding_dimension
else:
lowerCamelCase : Union[str, Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCamelCase : Any = num_parallel_samples
# Transformer architecture configuration
lowerCamelCase : List[Any] = input_size * len(_SCREAMING_SNAKE_CASE ) + self._number_of_features
lowerCamelCase : Any = d_model
lowerCamelCase : Dict = encoder_attention_heads
lowerCamelCase : Optional[Any] = decoder_attention_heads
lowerCamelCase : str = encoder_ffn_dim
lowerCamelCase : Optional[Any] = decoder_ffn_dim
lowerCamelCase : Tuple = encoder_layers
lowerCamelCase : Dict = decoder_layers
lowerCamelCase : Union[str, Any] = dropout
lowerCamelCase : Optional[Any] = attention_dropout
lowerCamelCase : Union[str, Any] = activation_dropout
lowerCamelCase : str = encoder_layerdrop
lowerCamelCase : str = decoder_layerdrop
lowerCamelCase : int = activation_function
lowerCamelCase : Tuple = init_std
lowerCamelCase : Optional[Any] = use_cache
super().__init__(is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def UpperCamelCase__ ( self ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 287 |
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def lowercase__ ( __UpperCamelCase )-> Union[str, Any]:
UpperCamelCase = min(__UpperCamelCase ) # min() finds the minimum value
UpperCamelCase = max(__UpperCamelCase ) # max() finds the maximum value
UpperCamelCase = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
UpperCamelCase = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(__UpperCamelCase , __UpperCamelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
UpperCamelCase = 0
for count in range(__UpperCamelCase ):
while holes[count] > 0:
holes[count] -= 1
UpperCamelCase = count + min_val
i += 1
def lowercase__ ( )-> Any:
UpperCamelCase = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(__UpperCamelCase )
print("""Sorted order is:""" , """ """.join(__UpperCamelCase ) )
if __name__ == "__main__":
main()
| 321 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase=False , __lowerCamelCase=False ) -> Dict:
lowercase__ : List[Any] = '''backbone.''' if is_semantic else ''''''
lowercase__ : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""{prefix}blocks.{i}.norm1.weight""", f"""beit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm1.bias""", f"""beit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.weight""", f"""beit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.bias""", f"""beit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.weight""", f"""beit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.bias""", f"""beit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.weight""", f"""beit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.bias""", f"""beit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.weight""", f"""beit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.bias""", f"""beit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
(f"""{prefix}cls_token""", '''beit.embeddings.cls_token'''),
(f"""{prefix}patch_embed.proj.weight""", '''beit.embeddings.patch_embeddings.projection.weight'''),
(f"""{prefix}patch_embed.proj.bias""", '''beit.embeddings.patch_embeddings.projection.bias'''),
(f"""{prefix}pos_embed""", '''beit.embeddings.position_embeddings'''),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('''mask_token''', '''beit.embeddings.mask_token'''),
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''),
('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False , __lowerCamelCase=False ) -> Dict:
for i in range(config.num_hidden_layers ):
lowercase__ : str = '''backbone.''' if is_semantic else ''''''
# queries, keys and values
lowercase__ : str = state_dict.pop(f"""{prefix}blocks.{i}.attn.qkv.weight""" )
lowercase__ : List[Any] = state_dict.pop(f"""{prefix}blocks.{i}.attn.q_bias""" )
lowercase__ : Tuple = state_dict.pop(f"""{prefix}blocks.{i}.attn.v_bias""" )
lowercase__ : Tuple = in_proj_weight[
: config.hidden_size, :
]
lowercase__ : Union[str, Any] = q_bias
lowercase__ : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ : Tuple = in_proj_weight[
-config.hidden_size :, :
]
lowercase__ : Union[str, Any] = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
lowercase__ : Optional[Any] = state_dict.pop(f"""{prefix}blocks.{i}.gamma_1""" )
lowercase__ : Union[str, Any] = state_dict.pop(f"""{prefix}blocks.{i}.gamma_2""" )
lowercase__ : int = gamma_a
lowercase__ : Tuple = gamma_a
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
lowercase__ : Union[str, Any] = dct.pop(__UpperCamelCase )
lowercase__ : Optional[int] = val
def __UpperCAmelCase ( ) -> Tuple:
lowercase__ : List[str] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ : Any = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False ) -> Union[str, Any]:
lowercase__ : Union[str, Any] = False if '''rvlcdip''' in checkpoint_url else True
lowercase__ : List[Any] = BeitConfig(use_absolute_position_embeddings=__UpperCamelCase , use_mask_token=__UpperCamelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
lowercase__ : int = 10_24
lowercase__ : Union[str, Any] = 40_96
lowercase__ : Dict = 24
lowercase__ : List[str] = 16
# labels
if "rvlcdip" in checkpoint_url:
lowercase__ : Tuple = 16
lowercase__ : Optional[int] = '''huggingface/label-files'''
lowercase__ : Optional[int] = '''rvlcdip-id2label.json'''
lowercase__ : Optional[int] = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ : Tuple = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
lowercase__ : Union[str, Any] = idalabel
lowercase__ : Tuple = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
lowercase__ : Optional[Any] = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location='''cpu''' )['''model''']
lowercase__ : Optional[Any] = create_rename_keys(__UpperCamelCase , has_lm_head=__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
read_in_q_k_v(__UpperCamelCase , __UpperCamelCase , has_lm_head=__UpperCamelCase )
# load HuggingFace model
lowercase__ : Tuple = BeitForMaskedImageModeling(__UpperCamelCase ) if has_lm_head else BeitForImageClassification(__UpperCamelCase )
model.eval()
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image
lowercase__ : Any = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__UpperCamelCase )
lowercase__ : int = prepare_img()
lowercase__ : str = image_processor(images=__UpperCamelCase , return_tensors='''pt''' )
lowercase__ : int = encoding['''pixel_values''']
lowercase__ : Tuple = model(__UpperCamelCase )
lowercase__ : str = outputs.logits
# verify logits
lowercase__ : List[Any] = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 1_96, 81_92]
assert logits.shape == torch.Size(__UpperCamelCase ), "Shape of logits not as expected"
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
if has_lm_head:
lowercase__ : List[str] = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large'''
else:
lowercase__ : Optional[Any] = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip'''
image_processor.push_to_hub(
repo_path_or_name=Path(__UpperCamelCase , __UpperCamelCase ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=__UpperCamelCase , )
model.push_to_hub(
repo_path_or_name=Path(__UpperCamelCase , __UpperCamelCase ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=__UpperCamelCase , )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth',
type=str,
help='URL to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
lowerCAmelCase_ = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 16 |
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class a_ ( lowerCamelCase ):
lowercase = (DDPMParallelScheduler,)
def A__ ( self , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def A__ ( self ) -> List[str]:
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=_SCREAMING_SNAKE_CASE , beta_end=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Tuple:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> str:
"""simple docstring"""
self.check_over_configs(thresholding=_SCREAMING_SNAKE_CASE )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_SCREAMING_SNAKE_CASE , prediction_type=_SCREAMING_SNAKE_CASE , sample_max_value=_SCREAMING_SNAKE_CASE , )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = self.dummy_sample_deter + 0.1
UpperCamelCase = self.dummy_sample_deter - 0.1
UpperCamelCase = samplea.shape[0]
UpperCamelCase = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCamelCase = torch.arange(_SCREAMING_SNAKE_CASE )[0:3, None].repeat(1 , _SCREAMING_SNAKE_CASE )
UpperCamelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCamelCase = scheduler.batch_step_no_noise(_SCREAMING_SNAKE_CASE , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1e-2
assert abs(result_mean.item() - 0.5_0_0_5 ) < 1e-3
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(_SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
UpperCamelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(_SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
UpperCamelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
UpperCamelCase = scheduler.timesteps
for i, timestep in enumerate(_SCREAMING_SNAKE_CASE ):
if i == len(_SCREAMING_SNAKE_CASE ) - 1:
UpperCamelCase = -1
else:
UpperCamelCase = timesteps[i + 1]
UpperCamelCase = scheduler.previous_timestep(_SCREAMING_SNAKE_CASE )
UpperCamelCase = prev_t.item()
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = [100, 87, 50, 51, 0]
with self.assertRaises(_SCREAMING_SNAKE_CASE , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = [100, 87, 50, 1, 0]
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
with self.assertRaises(_SCREAMING_SNAKE_CASE , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=_SCREAMING_SNAKE_CASE , timesteps=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_SCREAMING_SNAKE_CASE , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
| 321 | 0 |
"""simple docstring"""
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def __a ( *__lowerCamelCase ):
with open(__UpperCamelCase, "r" ) as fh:
fcntl.flock(__UpperCamelCase, fcntl.LOCK_EX )
try:
print(*__UpperCamelCase )
finally:
fcntl.flock(__UpperCamelCase, fcntl.LOCK_UN )
_a = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
_a = torch.device('cuda', local_rank)
_a = socket.gethostname()
_a = f"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group('nccl')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
_a = dist.get_rank()
_a = dist.get_world_size()
printflock(f"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(f"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(f"""{gpu} is broken""")
raise
| 61 |
'''simple docstring'''
from __future__ import annotations
import math
class a_ :
def __init__( self , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
UpperCamelCase = size
# approximate the overall size of segment tree with given value
UpperCamelCase = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
UpperCamelCase = [0 for i in range(0 , 4 * size )]
UpperCamelCase = [0 for i in range(0 , 4 * size )] # flag for lazy update
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return idx * 2
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return idx * 2 + 1
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
if left_element == right_element:
UpperCamelCase = a[left_element - 1]
else:
UpperCamelCase = (left_element + right_element) // 2
self.build(self.left(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.build(self.right(_SCREAMING_SNAKE_CASE ) , mid + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = max(
self.segment_tree[self.left(_SCREAMING_SNAKE_CASE )] , self.segment_tree[self.right(_SCREAMING_SNAKE_CASE )] )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
if self.flag[idx] is True:
UpperCamelCase = self.lazy[idx]
UpperCamelCase = False
if left_element != right_element:
UpperCamelCase = self.lazy[idx]
UpperCamelCase = self.lazy[idx]
UpperCamelCase = True
UpperCamelCase = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
UpperCamelCase = val
if left_element != right_element:
UpperCamelCase = val
UpperCamelCase = val
UpperCamelCase = True
UpperCamelCase = True
return True
UpperCamelCase = (left_element + right_element) // 2
self.update(self.left(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.update(self.right(_SCREAMING_SNAKE_CASE ) , mid + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = max(
self.segment_tree[self.left(_SCREAMING_SNAKE_CASE )] , self.segment_tree[self.right(_SCREAMING_SNAKE_CASE )] )
return True
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int | float:
"""simple docstring"""
if self.flag[idx] is True:
UpperCamelCase = self.lazy[idx]
UpperCamelCase = False
if left_element != right_element:
UpperCamelCase = self.lazy[idx]
UpperCamelCase = self.lazy[idx]
UpperCamelCase = True
UpperCamelCase = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
UpperCamelCase = (left_element + right_element) // 2
UpperCamelCase = self.query(self.left(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self.query(self.right(_SCREAMING_SNAKE_CASE ) , mid + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __str__( self ) -> str:
"""simple docstring"""
return str([self.query(1 , 1 , self.size , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8]
SCREAMING_SNAKE_CASE__ = 1_5
SCREAMING_SNAKE_CASE__ = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 1_1))
print(segt.query(1, 1, size, 7, 1_2))
segt.update(1, 1, size, 1, 3, 1_1_1)
print(segt.query(1, 1, size, 1, 1_5))
segt.update(1, 1, size, 7, 8, 2_3_5)
print(segt)
| 321 | 0 |
'''simple docstring'''
import argparse
import struct
import unittest
class A__ :
def __init__( self : Union[str, Any] , _a : str ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =data
# Initialize hash values
_SCREAMING_SNAKE_CASE =[
0X6_a_0_9_e_6_6_7,
0Xb_b_6_7_a_e_8_5,
0X3_c_6_e_f_3_7_2,
0Xa_5_4_f_f_5_3_a,
0X5_1_0_e_5_2_7_f,
0X9_b_0_5_6_8_8_c,
0X1_f_8_3_d_9_a_b,
0X5_b_e_0_c_d_1_9,
]
# Initialize round constants
_SCREAMING_SNAKE_CASE =[
0X4_2_8_a_2_f_9_8,
0X7_1_3_7_4_4_9_1,
0Xb_5_c_0_f_b_c_f,
0Xe_9_b_5_d_b_a_5,
0X3_9_5_6_c_2_5_b,
0X5_9_f_1_1_1_f_1,
0X9_2_3_f_8_2_a_4,
0Xa_b_1_c_5_e_d_5,
0Xd_8_0_7_a_a_9_8,
0X1_2_8_3_5_b_0_1,
0X2_4_3_1_8_5_b_e,
0X5_5_0_c_7_d_c_3,
0X7_2_b_e_5_d_7_4,
0X8_0_d_e_b_1_f_e,
0X9_b_d_c_0_6_a_7,
0Xc_1_9_b_f_1_7_4,
0Xe_4_9_b_6_9_c_1,
0Xe_f_b_e_4_7_8_6,
0X0_f_c_1_9_d_c_6,
0X2_4_0_c_a_1_c_c,
0X2_d_e_9_2_c_6_f,
0X4_a_7_4_8_4_a_a,
0X5_c_b_0_a_9_d_c,
0X7_6_f_9_8_8_d_a,
0X9_8_3_e_5_1_5_2,
0Xa_8_3_1_c_6_6_d,
0Xb_0_0_3_2_7_c_8,
0Xb_f_5_9_7_f_c_7,
0Xc_6_e_0_0_b_f_3,
0Xd_5_a_7_9_1_4_7,
0X0_6_c_a_6_3_5_1,
0X1_4_2_9_2_9_6_7,
0X2_7_b_7_0_a_8_5,
0X2_e_1_b_2_1_3_8,
0X4_d_2_c_6_d_f_c,
0X5_3_3_8_0_d_1_3,
0X6_5_0_a_7_3_5_4,
0X7_6_6_a_0_a_b_b,
0X8_1_c_2_c_9_2_e,
0X9_2_7_2_2_c_8_5,
0Xa_2_b_f_e_8_a_1,
0Xa_8_1_a_6_6_4_b,
0Xc_2_4_b_8_b_7_0,
0Xc_7_6_c_5_1_a_3,
0Xd_1_9_2_e_8_1_9,
0Xd_6_9_9_0_6_2_4,
0Xf_4_0_e_3_5_8_5,
0X1_0_6_a_a_0_7_0,
0X1_9_a_4_c_1_1_6,
0X1_e_3_7_6_c_0_8,
0X2_7_4_8_7_7_4_c,
0X3_4_b_0_b_c_b_5,
0X3_9_1_c_0_c_b_3,
0X4_e_d_8_a_a_4_a,
0X5_b_9_c_c_a_4_f,
0X6_8_2_e_6_f_f_3,
0X7_4_8_f_8_2_e_e,
0X7_8_a_5_6_3_6_f,
0X8_4_c_8_7_8_1_4,
0X8_c_c_7_0_2_0_8,
0X9_0_b_e_f_f_f_a,
0Xa_4_5_0_6_c_e_b,
0Xb_e_f_9_a_3_f_7,
0Xc_6_7_1_7_8_f_2,
]
_SCREAMING_SNAKE_CASE =self.preprocessing(self.data )
self.final_hash()
@staticmethod
def A ( _a : List[Any] ) -> bytes:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =b'\x80' + (b'\x00' * (63 - (len(_SCREAMING_SNAKE_CASE ) + 8) % 64))
_SCREAMING_SNAKE_CASE =struct.pack('>Q' , (len(_SCREAMING_SNAKE_CASE ) * 8) )
return data + padding + big_endian_integer
def A ( self : Dict ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
_SCREAMING_SNAKE_CASE =list(struct.unpack('>16L' , _SCREAMING_SNAKE_CASE ) )
# add 48 0-ed integers
words += [0] * 48
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
_SCREAMING_SNAKE_CASE =(
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
_SCREAMING_SNAKE_CASE =(
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
_SCREAMING_SNAKE_CASE =(
words[index - 16] + sa + words[index - 7] + sa
) % 0X1_0_0_0_0_0_0_0_0
# Compression
_SCREAMING_SNAKE_CASE =self.ror(_SCREAMING_SNAKE_CASE , 6 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 11 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 25 )
_SCREAMING_SNAKE_CASE =(e & f) ^ ((~e & 0Xf_f_f_f_f_f_f_f) & g)
_SCREAMING_SNAKE_CASE =(
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_0_0_0_0_0_0_0_0
_SCREAMING_SNAKE_CASE =self.ror(_SCREAMING_SNAKE_CASE , 2 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 13 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 22 )
_SCREAMING_SNAKE_CASE =(a & b) ^ (a & c) ^ (b & c)
_SCREAMING_SNAKE_CASE =(sa + maj) % 0X1_0_0_0_0_0_0_0_0
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =(
g,
f,
e,
((d + tempa) % 0X1_0_0_0_0_0_0_0_0),
c,
b,
a,
((tempa + tempa) % 0X1_0_0_0_0_0_0_0_0),
)
_SCREAMING_SNAKE_CASE =[a, b, c, d, e, f, g, h]
# Modify final values
_SCREAMING_SNAKE_CASE =[
((element + mutated_hash_values[index]) % 0X1_0_0_0_0_0_0_0_0)
for index, element in enumerate(self.hashes )
]
_SCREAMING_SNAKE_CASE =''.join([hex(_SCREAMING_SNAKE_CASE )[2:].zfill(8 ) for value in self.hashes] )
def A ( self : Optional[int] , _a : Any , _a : Optional[int] ) -> int:
'''simple docstring'''
return 0Xf_f_f_f_f_f_f_f & (value << (32 - rotations)) | (value >> rotations)
class A__ ( unittest.TestCase ):
def A ( self : Any ) -> None:
'''simple docstring'''
import hashlib
_SCREAMING_SNAKE_CASE =bytes('Test String' , 'utf-8' )
self.assertEqual(SHAaaa(_SCREAMING_SNAKE_CASE ).hash , hashlib.shaaaa(_SCREAMING_SNAKE_CASE ).hexdigest() )
def _lowerCAmelCase ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
_SCREAMING_SNAKE_CASE =parser.parse_args()
_SCREAMING_SNAKE_CASE =args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
_SCREAMING_SNAKE_CASE =f.read()
else:
_SCREAMING_SNAKE_CASE =bytes(__UpperCamelCase , 'utf-8' )
print(SHAaaa(__UpperCamelCase ).hash )
if __name__ == "__main__":
main()
| 47 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase = 1000 )-> int:
UpperCamelCase = -1
UpperCamelCase = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
UpperCamelCase = (n * n - 2 * a * n) // (2 * n - 2 * a)
UpperCamelCase = n - a - b
if c * c == (a * a + b * b):
UpperCamelCase = a * b * c
if candidate >= product:
UpperCamelCase = candidate
return product
if __name__ == "__main__":
print(f'{solution() = }')
| 321 | 0 |
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_a : Union[str, Any]= "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
_a : Optional[int]= [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def __UpperCAmelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple ) -> Optional[int]:
'''simple docstring'''
__snake_case : int = SavedModel()
__snake_case : Any = []
with open(os.path.join(__UpperCamelCase , 'utils' , 'tf_ops' , 'onnx.json' ) ) as f:
__snake_case : List[Any] = json.load(__UpperCamelCase )['opsets']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__UpperCamelCase )] )
with open(__UpperCamelCase , 'rb' ) as f:
saved_model.ParseFromString(f.read() )
__snake_case : Union[str, Any] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
__snake_case : List[Any] = sorted(__UpperCamelCase )
__snake_case : str = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__UpperCamelCase )
if strict and len(__UpperCamelCase ) > 0:
raise Exception(F"Found the following incompatible ops for the opset {opset}:\n" + incompatible_ops )
elif len(__UpperCamelCase ) > 0:
print(F"Found the following incompatible ops for the opset {opset}:" )
print(*__UpperCamelCase , sep='\n' )
else:
print(F"The saved model {saved_model_path} can properly be converted with ONNX." )
if __name__ == "__main__":
_a : Optional[int]= argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
_a : Union[str, Any]= parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 172 |
'''simple docstring'''
import argparse
import struct
import unittest
class a_ :
def __init__( self , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
UpperCamelCase = data
# Initialize hash values
UpperCamelCase = [
0x6A_09_E6_67,
0xBB_67_AE_85,
0x3C_6E_F3_72,
0xA5_4F_F5_3A,
0x51_0E_52_7F,
0x9B_05_68_8C,
0x1F_83_D9_AB,
0x5B_E0_CD_19,
]
# Initialize round constants
UpperCamelCase = [
0x42_8A_2F_98,
0x71_37_44_91,
0xB5_C0_FB_CF,
0xE9_B5_DB_A5,
0x39_56_C2_5B,
0x59_F1_11_F1,
0x92_3F_82_A4,
0xAB_1C_5E_D5,
0xD8_07_AA_98,
0x12_83_5B_01,
0x24_31_85_BE,
0x55_0C_7D_C3,
0x72_BE_5D_74,
0x80_DE_B1_FE,
0x9B_DC_06_A7,
0xC1_9B_F1_74,
0xE4_9B_69_C1,
0xEF_BE_47_86,
0x0F_C1_9D_C6,
0x24_0C_A1_CC,
0x2D_E9_2C_6F,
0x4A_74_84_AA,
0x5C_B0_A9_DC,
0x76_F9_88_DA,
0x98_3E_51_52,
0xA8_31_C6_6D,
0xB0_03_27_C8,
0xBF_59_7F_C7,
0xC6_E0_0B_F3,
0xD5_A7_91_47,
0x06_CA_63_51,
0x14_29_29_67,
0x27_B7_0A_85,
0x2E_1B_21_38,
0x4D_2C_6D_FC,
0x53_38_0D_13,
0x65_0A_73_54,
0x76_6A_0A_BB,
0x81_C2_C9_2E,
0x92_72_2C_85,
0xA2_BF_E8_A1,
0xA8_1A_66_4B,
0xC2_4B_8B_70,
0xC7_6C_51_A3,
0xD1_92_E8_19,
0xD6_99_06_24,
0xF4_0E_35_85,
0x10_6A_A0_70,
0x19_A4_C1_16,
0x1E_37_6C_08,
0x27_48_77_4C,
0x34_B0_BC_B5,
0x39_1C_0C_B3,
0x4E_D8_AA_4A,
0x5B_9C_CA_4F,
0x68_2E_6F_F3,
0x74_8F_82_EE,
0x78_A5_63_6F,
0x84_C8_78_14,
0x8C_C7_02_08,
0x90_BE_FF_FA,
0xA4_50_6C_EB,
0xBE_F9_A3_F7,
0xC6_71_78_F2,
]
UpperCamelCase = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def A__ ( _SCREAMING_SNAKE_CASE ) -> bytes:
"""simple docstring"""
UpperCamelCase = B"""\x80""" + (B"""\x00""" * (63 - (len(_SCREAMING_SNAKE_CASE ) + 8) % 64))
UpperCamelCase = struct.pack(""">Q""" , (len(_SCREAMING_SNAKE_CASE ) * 8) )
return data + padding + big_endian_integer
def A__ ( self ) -> None:
"""simple docstring"""
UpperCamelCase = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
UpperCamelCase = list(struct.unpack(""">16L""" , _SCREAMING_SNAKE_CASE ) )
# add 48 0-ed integers
words += [0] * 48
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
UpperCamelCase = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
UpperCamelCase = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
UpperCamelCase = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_00_00_00_00
# Compression
UpperCamelCase = self.ror(_SCREAMING_SNAKE_CASE , 6 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 11 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 25 )
UpperCamelCase = (e & f) ^ ((~e & 0xFF_FF_FF_FF) & g)
UpperCamelCase = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_00_00_00_00
UpperCamelCase = self.ror(_SCREAMING_SNAKE_CASE , 2 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 13 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 22 )
UpperCamelCase = (a & b) ^ (a & c) ^ (b & c)
UpperCamelCase = (sa + maj) % 0x1_00_00_00_00
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = (
g,
f,
e,
((d + tempa) % 0x1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0x1_00_00_00_00),
)
UpperCamelCase = [a, b, c, d, e, f, g, h]
# Modify final values
UpperCamelCase = [
((element + mutated_hash_values[index]) % 0x1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
UpperCamelCase = """""".join([hex(_SCREAMING_SNAKE_CASE )[2:].zfill(8 ) for value in self.hashes] )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return 0xFF_FF_FF_FF & (value << (32 - rotations)) | (value >> rotations)
class a_ ( unittest.TestCase ):
def A__ ( self ) -> None:
"""simple docstring"""
import hashlib
UpperCamelCase = bytes("""Test String""" , """utf-8""" )
self.assertEqual(SHAaaa(_SCREAMING_SNAKE_CASE ).hash , hashlib.shaaaa(_SCREAMING_SNAKE_CASE ).hexdigest() )
def lowercase__ ( )-> None:
import doctest
doctest.testmod()
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
UpperCamelCase = parser.parse_args()
UpperCamelCase = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
UpperCamelCase = f.read()
else:
UpperCamelCase = bytes(__UpperCamelCase , """utf-8""" )
print(SHAaaa(__UpperCamelCase ).hash )
if __name__ == "__main__":
main()
| 321 | 0 |
import math
def __UpperCAmelCase ( __a : str ,__a : List[Any] ) -> float:
"""simple docstring"""
if initial_intensity < 0:
raise ValueError('''The value of intensity cannot be negative''' )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__UpperCamelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='''malus_law''')
| 235 |
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
SCREAMING_SNAKE_CASE__ = _symbol_database.Default()
SCREAMING_SNAKE_CASE__ = _descriptor_pool.Default().AddSerializedFile(
b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'
)
SCREAMING_SNAKE_CASE__ = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = b'H\003'
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
SCREAMING_SNAKE_CASE__ = 4_5
SCREAMING_SNAKE_CASE__ = 1_5_8_1
SCREAMING_SNAKE_CASE__ = 1_5_1_7
SCREAMING_SNAKE_CASE__ = 1_5_7_0
SCREAMING_SNAKE_CASE__ = 1_5_8_4
SCREAMING_SNAKE_CASE__ = 1_7_9_3
SCREAMING_SNAKE_CASE__ = 1_7_9_5
SCREAMING_SNAKE_CASE__ = 1_9_1_6
SCREAMING_SNAKE_CASE__ = 1_8_6_4
SCREAMING_SNAKE_CASE__ = 1_9_0_5
SCREAMING_SNAKE_CASE__ = 1_9_1_9
SCREAMING_SNAKE_CASE__ = 2_4_2_9
SCREAMING_SNAKE_CASE__ = 2_2_0_8
SCREAMING_SNAKE_CASE__ = 2_4_1_8
SCREAMING_SNAKE_CASE__ = 2_3_2_3
SCREAMING_SNAKE_CASE__ = 2_4_0_7
# @@protoc_insertion_point(module_scope)
| 321 | 0 |
"""simple docstring"""
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class __a (UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :List[Any] = RoFormerTokenizer
_SCREAMING_SNAKE_CASE :Any = RoFormerTokenizerFast
_SCREAMING_SNAKE_CASE :Optional[Any] = True
_SCREAMING_SNAKE_CASE :Optional[Any] = True
def _a ( self ) -> Any:
"""simple docstring"""
super().setUp()
def _a ( self , **_a ) -> Any:
"""simple docstring"""
return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **_SCREAMING_SNAKE_CASE )
def _a ( self , **_a ) -> Tuple:
"""simple docstring"""
return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **_SCREAMING_SNAKE_CASE )
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = """永和服装饰品有限公司,今天天气非常好"""
SCREAMING_SNAKE_CASE__ : Optional[int] = """永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"""
return input_text, output_text
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_chinese_input_output_texts()
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , output_text.split() )
SCREAMING_SNAKE_CASE__ : Any = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [22_943, 21_332, 34_431, 45_904, 117, 306, 1_231, 1_231, 2_653, 33_994, 1_266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = self.get_chinese_input_output_texts()
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , output_text.split() )
SCREAMING_SNAKE_CASE__ : Any = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ : Optional[int] = [22_943, 21_332, 34_431, 45_904, 117, 306, 1_231, 1_231, 2_653, 33_994, 1_266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def _a ( self ) -> int:
"""simple docstring"""
pass
def _a ( self ) -> Dict:
"""simple docstring"""
pass
def _a ( self ) -> List[Any]:
"""simple docstring"""
pass
| 132 |
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 8.31_44_62 # Unit - J mol-1 K-1
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 321 | 0 |
__UpperCamelCase : Union[str, Any] = {
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
__UpperCamelCase : List[str] = {value: key for key, value in encode_dict.items()}
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = ''''''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('''encode() accepts only letters of the alphabet and spaces''' )
return encoded
def A ( _lowercase ):
if set(__UpperCamelCase ) - {"A", "B", " "} != set():
raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' )
SCREAMING_SNAKE_CASE : str = ''''''
for word in coded.split():
while len(__UpperCamelCase ) != 0:
decoded += decode_dict[word[:5]]
SCREAMING_SNAKE_CASE : Dict = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 182 |
'''simple docstring'''
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
SCREAMING_SNAKE_CASE__ = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
SCREAMING_SNAKE_CASE__ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase__ ( __UpperCamelCase )-> str:
if "://" in dataset_path:
UpperCamelCase = dataset_path.split("""://""" )[1]
return dataset_path
def lowercase__ ( __UpperCamelCase )-> bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> int:
UpperCamelCase = not is_remote_filesystem(__UpperCamelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__UpperCamelCase ) , fs._strip_protocol(__UpperCamelCase ) )
else:
fs.mv(__UpperCamelCase , __UpperCamelCase , recursive=__UpperCamelCase )
def lowercase__ ( )-> None:
if hasattr(fsspec.asyn , """reset_lock""" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = threading.Lock()
| 321 | 0 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ =["""image_processor""", """tokenizer"""]
a_ ="""BlipImageProcessor"""
a_ ="""AutoTokenizer"""
def __init__( self : Optional[Any] , _a : Tuple , _a : Dict , _a : int ) -> int:
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# add QFormer tokenizer
__lowerCamelCase : Optional[int] = qformer_tokenizer
def __call__( self : Dict , _a : Union[str, Any] = None , _a : Optional[int] = None , _a : List[Any] = True , _a : str = False , _a : Dict = None , _a : Optional[int] = None , _a : List[Any] = 0 , _a : Optional[Any] = None , _a : Dict = None , _a : int = False , _a : Optional[Any] = False , _a : Optional[int] = False , _a : Optional[Any] = False , _a : int = False , _a : List[Any] = True , _a : int = None , **_a : Optional[Any] , ) -> BatchFeature:
if images is None and text is None:
raise ValueError('You have to specify at least images or text.' )
__lowerCamelCase : str = BatchFeature()
if text is not None:
__lowerCamelCase : List[Any] = self.tokenizer(
text=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_overflowing_tokens=_SCREAMING_SNAKE_CASE , return_special_tokens_mask=_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , return_length=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
encoding.update(_SCREAMING_SNAKE_CASE )
__lowerCamelCase : str = self.qformer_tokenizer(
text=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_overflowing_tokens=_SCREAMING_SNAKE_CASE , return_special_tokens_mask=_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , return_length=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
__lowerCamelCase : Dict = qformer_text_encoding.pop('input_ids' )
__lowerCamelCase : int = qformer_text_encoding.pop('attention_mask' )
if images is not None:
__lowerCamelCase : List[Any] = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE )
encoding.update(_SCREAMING_SNAKE_CASE )
return encoding
def _lowercase ( self : Optional[int] , *_a : Optional[int] , **_a : Dict ) -> int:
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowercase ( self : Union[str, Any] , *_a : Tuple , **_a : List[str] ) -> Tuple:
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _lowercase ( self : Tuple ) -> Tuple:
__lowerCamelCase : List[Any] = self.tokenizer.model_input_names
__lowerCamelCase : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def _lowercase ( self : List[Any] , _a : Tuple , **_a : Optional[int] ) -> Tuple:
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
__lowerCamelCase : Dict = os.path.join(_SCREAMING_SNAKE_CASE , 'qformer_tokenizer' )
self.qformer_tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
return super().save_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@classmethod
def _lowercase ( cls : Any , _a : Dict , **_a : List[Any] ) -> str:
__lowerCamelCase : Tuple = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder='qformer_tokenizer' )
__lowerCamelCase : List[Any] = cls._get_arguments_from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
args.append(_SCREAMING_SNAKE_CASE )
return cls(*_SCREAMING_SNAKE_CASE )
| 208 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
'configuration_xlm_roberta_xl': [
'XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaXLConfig',
'XLMRobertaXLOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaXLForCausalLM',
'XLMRobertaXLForMaskedLM',
'XLMRobertaXLForMultipleChoice',
'XLMRobertaXLForQuestionAnswering',
'XLMRobertaXLForSequenceClassification',
'XLMRobertaXLForTokenClassification',
'XLMRobertaXLModel',
'XLMRobertaXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 321 | 0 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
a__ : Optional[Any] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
a__ : Union[str, Any] = 1_2_8_0_2_2
a__ : str = 1_2_8_0_2_8
@require_sentencepiece
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase):
UpperCAmelCase__ : str = MaMaaaTokenizer
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : int = True
def lowercase_ ( self :Tuple ) -> int:
'''simple docstring'''
super().setUp()
__A = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
__A = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
__A = Path(self.tmpdirname )
save_json(_SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES['vocab_file'] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES['spm_file'] )
__A = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self :str , **_A :int ) -> Optional[int]:
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def lowercase_ ( self :Dict , _A :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def lowercase_ ( self :List[Any] ) -> int:
'''simple docstring'''
__A = '</s>'
__A = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def lowercase_ ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
__A = self.get_tokenizer()
__A = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '</s>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '<s>' )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('Skip this test while all models are still to be uploaded.' )
def lowercase_ ( self :int ) -> List[str]:
'''simple docstring'''
pass
def lowercase_ ( self :str ) -> Tuple:
'''simple docstring'''
__A = self.get_tokenizer()
__A = tokenizer.tokenize('This is a test' )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [2, 3, 4, 5, 6] , )
__A = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ['▁This', '▁is', '▁a', '▁t', 'est'] )
__A = tokenizer.convert_tokens_to_string(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , 'This is a test' )
@slow
def lowercase_ ( self :Optional[int] ) -> int:
'''simple docstring'''
__A = {'input_ids': [[128_022, 110_108, 397, 11, 38_272, 2_247, 124_811, 285, 18_105, 1_586, 207, 7, 39_534, 4_428, 397, 1_019, 18_105, 1_586, 207, 7, 41_337, 16_786, 241, 7, 20_214, 17, 125_690, 10_398, 7, 44_378, 58_069, 68_342, 7_798, 7_343, 11, 299, 33_310, 4, 158, 37_350, 94_077, 4_569, 299, 33_310, 90, 4, 52_840, 290, 4, 31_270, 112, 299, 682, 4, 52_840, 39_953, 14_079, 193, 52_519, 90_894, 17_894, 120_697, 11, 40_445, 551, 17, 1_019, 52_519, 90_894, 17_756, 963, 11, 40_445, 480, 17, 9_792, 1_120, 5_173, 1_393, 6_240, 16_786, 241, 120_996, 28, 1_245, 1_393, 118_240, 11_123, 1_019, 93_612, 2_691, 10_618, 98_058, 120_409, 1_928, 279, 4, 40_683, 367, 178, 207, 1_019, 103, 103_121, 506, 65_296, 5, 2], [128_022, 21_217, 367, 117, 125_450, 128, 719, 7, 7_308, 40, 93_612, 12_669, 1_116, 16_704, 71, 17_785, 3_699, 15_592, 35, 144, 9_584, 241, 11_943, 713, 950, 799, 2_247, 88_427, 150, 149, 118_813, 120_706, 1_019, 106_906, 81_518, 28, 1_224, 22_799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128_022, 1_658, 123_311, 5_155, 5_578, 4_722, 279, 14_947, 2_366, 1_120, 1_197, 14, 1_348, 9_232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name='facebook/m2m100_418M' , revision='c168bae485c864188cf9aa0e4108b0b6934dc91e' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase):
UpperCAmelCase__ : str = 'facebook/m2m100_418M'
UpperCAmelCase__ : Optional[Any] = [
'In my opinion, there are two levels of response from the French government.',
'NSA Affair Emphasizes Complete Lack of Debate on Intelligence',
]
UpperCAmelCase__ : Any = [
'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.',
'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement',
]
# fmt: off
UpperCAmelCase__ : str = [EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2]
@classmethod
def lowercase_ ( cls :List[Any] ) -> List[Any]:
'''simple docstring'''
__A = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en' , tgt_lang='fr' )
__A = 1
return cls
def lowercase_ ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id('ar' ) , 128_006 )
self.assertEqual(self.tokenizer.get_lang_id('en' ) , 128_022 )
self.assertEqual(self.tokenizer.get_lang_id('ro' ) , 128_076 )
self.assertEqual(self.tokenizer.get_lang_id('mr' ) , 128_063 )
def lowercase_ ( self :Dict ) -> List[Any]:
'''simple docstring'''
__A = self.tokenizer.get_vocab()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.tokenizer.vocab_size )
self.assertEqual(vocab['<unk>'] , 3 )
self.assertIn(self.tokenizer.get_lang_token('en' ) , _SCREAMING_SNAKE_CASE )
def lowercase_ ( self :Any ) -> List[str]:
'''simple docstring'''
__A = 'en'
__A = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _SCREAMING_SNAKE_CASE )
def lowercase_ ( self :List[Any] ) -> Dict:
'''simple docstring'''
self.assertIn(_SCREAMING_SNAKE_CASE , self.tokenizer.all_special_ids )
# fmt: off
__A = [FR_CODE, 5_364, 82, 8_642, 4, 294, 47, 8, 14_028, 136, 3_286, 9_706, 6, 90_797, 6, 144_012, 162, 88_128, 30_061, 5, 2]
# fmt: on
__A = self.tokenizer.decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
__A = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertNotIn(self.tokenizer.eos_token , _SCREAMING_SNAKE_CASE )
def lowercase_ ( self :int ) -> Any:
'''simple docstring'''
__A = tempfile.mkdtemp()
__A = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
__A = MaMaaaTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertDictEqual(new_tok.lang_token_to_id , _SCREAMING_SNAKE_CASE )
@require_torch
def lowercase_ ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
__A = 'en'
__A = 'fr'
__A = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
__A = shift_tokens_right(
batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
__A = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def lowercase_ ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
__A = 'mr'
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('mr' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
__A = 'zh'
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('zh' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def lowercase_ ( self :Any ) -> int:
'''simple docstring'''
__A = 'mr'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('mr' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
__A = 'zh'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('zh' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def lowercase_ ( self :str ) -> Union[str, Any]:
'''simple docstring'''
__A = self.tokenizer._build_translation_inputs('A test' , return_tensors='pt' , src_lang='en' , tgt_lang='ar' )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , {
# en_XX, A, test, EOS
'input_ids': [[128_022, 58, 4_183, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 128_006,
} , )
| 161 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
SCREAMING_SNAKE_CASE__ = 'docs/source/en/_toctree.yml'
def lowercase__ ( __UpperCamelCase )-> Optional[Any]:
UpperCamelCase = defaultdict(__UpperCamelCase )
UpperCamelCase = []
UpperCamelCase = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} )
else:
new_doc_list.append(__UpperCamelCase )
UpperCamelCase = new_doc_list
UpperCamelCase = [key for key, value in counts.items() if value > 1]
UpperCamelCase = []
for duplicate_key in duplicates:
UpperCamelCase = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} )
if len(__UpperCamelCase ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] )
UpperCamelCase = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(__UpperCamelCase ) > 1:
raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" )
overview_doc.extend(__UpperCamelCase )
# Sort
return overview_doc
def lowercase__ ( __UpperCamelCase=False )-> List[str]:
with open(__UpperCamelCase , encoding="""utf-8""" ) as f:
UpperCamelCase = yaml.safe_load(f.read() )
# Get to the API doc
UpperCamelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCamelCase = content[api_idx]["""sections"""]
# Then to the model doc
UpperCamelCase = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
UpperCamelCase = api_doc[scheduler_idx]["""sections"""]
UpperCamelCase = clean_doc_toc(__UpperCamelCase )
UpperCamelCase = False
if new_scheduler_doc != scheduler_doc:
UpperCamelCase = True
if overwrite:
UpperCamelCase = new_scheduler_doc
if diff:
if overwrite:
UpperCamelCase = api_doc
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(__UpperCamelCase , allow_unicode=__UpperCamelCase ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
def lowercase__ ( __UpperCamelCase=False )-> Tuple:
with open(__UpperCamelCase , encoding="""utf-8""" ) as f:
UpperCamelCase = yaml.safe_load(f.read() )
# Get to the API doc
UpperCamelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCamelCase = content[api_idx]["""sections"""]
# Then to the model doc
UpperCamelCase = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
UpperCamelCase = False
UpperCamelCase = api_doc[pipeline_idx]["""sections"""]
UpperCamelCase = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
UpperCamelCase = pipeline_doc["""section"""]
UpperCamelCase = clean_doc_toc(__UpperCamelCase )
if overwrite:
UpperCamelCase = new_sub_pipeline_doc
new_pipeline_docs.append(__UpperCamelCase )
# sort overall pipeline doc
UpperCamelCase = clean_doc_toc(__UpperCamelCase )
if new_pipeline_docs != pipeline_docs:
UpperCamelCase = True
if overwrite:
UpperCamelCase = new_pipeline_docs
if diff:
if overwrite:
UpperCamelCase = api_doc
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(__UpperCamelCase , allow_unicode=__UpperCamelCase ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 321 | 0 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=2 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=16 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=3 , UpperCamelCase_=4 , UpperCamelCase_=None , ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = parent
UpperCamelCase__ :Dict = 13
UpperCamelCase__ :Optional[int] = 7
UpperCamelCase__ :Optional[Any] = True
UpperCamelCase__ :str = True
UpperCamelCase__ :Tuple = True
UpperCamelCase__ :Tuple = True
UpperCamelCase__ :Optional[Any] = 99
UpperCamelCase__ :str = 384
UpperCamelCase__ :List[str] = 2
UpperCamelCase__ :int = 4
UpperCamelCase__ :str = 37
UpperCamelCase__ :List[Any] = '''gelu'''
UpperCamelCase__ :Any = 0.1
UpperCamelCase__ :Optional[Any] = 0.1
UpperCamelCase__ :Any = 512
UpperCamelCase__ :int = 16
UpperCamelCase__ :List[str] = 2
UpperCamelCase__ :int = 0.02
UpperCamelCase__ :str = 3
UpperCamelCase__ :Dict = 4
UpperCamelCase__ :Any = 128
UpperCamelCase__ :Optional[Any] = 2
UpperCamelCase__ :Any = 9
UpperCamelCase__ :List[Any] = 1
UpperCamelCase__ :Dict = None
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ :List[Any] = None
if self.use_input_mask:
UpperCamelCase__ :Any = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ :List[Any] = None
if self.use_token_type_ids:
UpperCamelCase__ :str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ :str = None
UpperCamelCase__ :Optional[Any] = None
UpperCamelCase__ :Optional[int] = None
if self.use_labels:
UpperCamelCase__ :str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ :Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ :int = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ :Tuple = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_SCREAMING_SNAKE_CASE , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :int = TFConvBertModel(config=_SCREAMING_SNAKE_CASE )
UpperCamelCase__ :Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCamelCase__ :Dict = [input_ids, input_mask]
UpperCamelCase__ :Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
UpperCamelCase__ :Any = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Dict = TFConvBertForMaskedLM(config=_SCREAMING_SNAKE_CASE )
UpperCamelCase__ :List[str] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase__ :int = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :str = self.num_labels
UpperCamelCase__ :Any = TFConvBertForSequenceClassification(config=_SCREAMING_SNAKE_CASE )
UpperCamelCase__ :Union[str, Any] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase__ :Tuple = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = self.num_choices
UpperCamelCase__ :List[str] = TFConvBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
UpperCamelCase__ :Dict = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ :int = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ :int = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ :Optional[int] = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
UpperCamelCase__ :int = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.num_labels
UpperCamelCase__ :Optional[int] = TFConvBertForTokenClassification(config=_SCREAMING_SNAKE_CASE )
UpperCamelCase__ :Tuple = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase__ :Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Any = TFConvBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
UpperCamelCase__ :List[str] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase__ :Tuple = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Optional[int] = config_and_inputs
UpperCamelCase__ :int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowercase ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_a = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_a = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_a = False
_a = False
_a = False
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = TFConvBertModelTester(self )
UpperCamelCase__ :Dict = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_SCREAMING_SNAKE_CASE )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ :Optional[Any] = True
UpperCamelCase__ :List[str] = True
if hasattr(_SCREAMING_SNAKE_CASE , '''use_cache''' ):
UpperCamelCase__ :int = True
UpperCamelCase__ :Tuple = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
UpperCamelCase__ :Optional[int] = getattr(self.model_tester , '''key_length''' , _SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
UpperCamelCase__ :Tuple = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase__ :str = model_class(_SCREAMING_SNAKE_CASE )
UpperCamelCase__ :Any = len(model(_SCREAMING_SNAKE_CASE ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE , saved_model=_SCREAMING_SNAKE_CASE )
UpperCamelCase__ :Tuple = os.path.join(_SCREAMING_SNAKE_CASE , '''saved_model''' , '''1''' )
UpperCamelCase__ :Any = tf.keras.models.load_model(_SCREAMING_SNAKE_CASE )
UpperCamelCase__ :Optional[Any] = model(_SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
UpperCamelCase__ :int = outputs['''encoder_hidden_states''']
UpperCamelCase__ :List[str] = outputs['''encoder_attentions''']
else:
UpperCamelCase__ :List[Any] = outputs['''hidden_states''']
UpperCamelCase__ :Optional[Any] = outputs['''attentions''']
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
UpperCamelCase__ :int = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ :Optional[Any] = True
UpperCamelCase__ :Optional[int] = getattr(self.model_tester , '''decoder_seq_length''' , self.model_tester.seq_length )
UpperCamelCase__ :Dict = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
UpperCamelCase__ :Tuple = getattr(self.model_tester , '''key_length''' , _SCREAMING_SNAKE_CASE )
UpperCamelCase__ :Tuple = getattr(self.model_tester , '''key_length''' , _SCREAMING_SNAKE_CASE )
def check_decoder_attentions_output(UpperCamelCase_ ):
UpperCamelCase__ :int = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(out_len % 2 , 0 )
UpperCamelCase__ :Optional[int] = outputs.decoder_attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(UpperCamelCase_ ):
UpperCamelCase__ :List[str] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
UpperCamelCase__ :str = True
UpperCamelCase__ :Tuple = False
UpperCamelCase__ :Union[str, Any] = model_class(_SCREAMING_SNAKE_CASE )
UpperCamelCase__ :List[Any] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ :Tuple = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
UpperCamelCase__ :Optional[Any] = model_class(_SCREAMING_SNAKE_CASE )
UpperCamelCase__ :Union[str, Any] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_decoder_attentions_output(_SCREAMING_SNAKE_CASE )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCamelCase__ :Optional[int] = True
UpperCamelCase__ :List[Any] = model_class(_SCREAMING_SNAKE_CASE )
UpperCamelCase__ :Union[str, Any] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
UpperCamelCase__ :Any = True
UpperCamelCase__ :Dict = True
UpperCamelCase__ :Tuple = model_class(_SCREAMING_SNAKE_CASE )
UpperCamelCase__ :str = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_SCREAMING_SNAKE_CASE ) )
self.assertEqual(model.config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
@require_tf
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
UpperCamelCase__ :Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase__ :Optional[int] = model(_SCREAMING_SNAKE_CASE )[0]
UpperCamelCase__ :Tuple = [1, 6, 768]
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase__ :Any = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) | 97 |
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[str]:
UpperCamelCase = 1.5
UpperCamelCase = int(factor * num_class_images )
UpperCamelCase = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=__UpperCamelCase , aesthetic_weight=0.1 )
os.makedirs(F"{class_data_dir}/images" , exist_ok=__UpperCamelCase )
if len(list(Path(F"{class_data_dir}/images" ).iterdir() ) ) >= num_class_images:
return
while True:
UpperCamelCase = client.query(text=__UpperCamelCase )
if len(__UpperCamelCase ) >= factor * num_class_images or num_images > 1E4:
break
else:
UpperCamelCase = int(factor * num_images )
UpperCamelCase = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=__UpperCamelCase , aesthetic_weight=0.1 , )
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = tqdm(desc="""downloading real regularization images""" , total=__UpperCamelCase )
with open(F"{class_data_dir}/caption.txt" , """w""" ) as fa, open(F"{class_data_dir}/urls.txt" , """w""" ) as fa, open(
F"{class_data_dir}/images.txt" , """w""" ) as fa:
while total < num_class_images:
UpperCamelCase = class_images[count]
count += 1
try:
UpperCamelCase = requests.get(images["""url"""] )
if img.status_code == 200:
UpperCamelCase = Image.open(BytesIO(img.content ) )
with open(F"{class_data_dir}/images/{total}.jpg" , """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(F"{class_data_dir}/images/{total}.jpg" + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowercase__ ( )-> str:
UpperCamelCase = argparse.ArgumentParser("""""" , add_help=__UpperCamelCase )
parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=__UpperCamelCase , type=__UpperCamelCase )
parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=__UpperCamelCase , type=__UpperCamelCase )
parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=__UpperCamelCase )
return parser.parse_args()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 321 | 0 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def _a ( lowerCamelCase ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4_e_0_0 and cp <= 0x9_f_f_f)
or (cp >= 0x3_4_0_0 and cp <= 0x4_d_b_f) #
or (cp >= 0x2_0_0_0_0 and cp <= 0x2_a_6_d_f) #
or (cp >= 0x2_a_7_0_0 and cp <= 0x2_b_7_3_f) #
or (cp >= 0x2_b_7_4_0 and cp <= 0x2_b_8_1_f) #
or (cp >= 0x2_b_8_2_0 and cp <= 0x2_c_e_a_f) #
or (cp >= 0xf_9_0_0 and cp <= 0xf_a_f_f)
or (cp >= 0x2_f_8_0_0 and cp <= 0x2_f_a_1_f) #
): #
return True
return False
def _a ( lowerCamelCase ):
# word like '180' or '身高' or '神'
for char in word:
lowerCamelCase : Dict = ord(__UpperCamelCase )
if not _is_chinese_char(__UpperCamelCase ):
return 0
return 1
def _a ( lowerCamelCase ):
lowerCamelCase : Any = set()
for token in tokens:
lowerCamelCase : Tuple = len(__UpperCamelCase ) > 1 and is_chinese(__UpperCamelCase )
if chinese_word:
word_set.add(__UpperCamelCase )
lowerCamelCase : Optional[Any] = list(__UpperCamelCase )
return word_list
def _a ( lowerCamelCase, lowerCamelCase ):
if not chinese_word_set:
return bert_tokens
lowerCamelCase : List[str] = max([len(__UpperCamelCase ) for w in chinese_word_set] )
lowerCamelCase : Optional[Any] = bert_tokens
lowerCamelCase , lowerCamelCase : Optional[Any] = 0, len(__UpperCamelCase )
while start < end:
lowerCamelCase : Optional[int] = True
if is_chinese(bert_word[start] ):
lowerCamelCase : str = min(end - start, __UpperCamelCase )
for i in range(__UpperCamelCase, 1, -1 ):
lowerCamelCase : int = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1, start + i ):
lowerCamelCase : Optional[Any] = """##""" + bert_word[j]
lowerCamelCase : Optional[Any] = start + i
lowerCamelCase : int = False
break
if single_word:
start += 1
return bert_word
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Union[str, Any] = []
for i in range(0, len(__UpperCamelCase ), 100 ):
lowerCamelCase : List[str] = ltp_tokenizer.pipeline(lines[i : i + 100], tasks=["""cws"""] ).cws
lowerCamelCase : str = [get_chinese_word(__UpperCamelCase ) for r in res]
ltp_res.extend(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
lowerCamelCase : Dict = []
for i in range(0, len(__UpperCamelCase ), 100 ):
lowerCamelCase : List[str] = bert_tokenizer(lines[i : i + 100], add_special_tokens=__UpperCamelCase, truncation=__UpperCamelCase, max_length=512 )
bert_res.extend(res["""input_ids"""] )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
lowerCamelCase : int = []
for input_ids, chinese_word in zip(__UpperCamelCase, __UpperCamelCase ):
lowerCamelCase : str = []
for id in input_ids:
lowerCamelCase : int = bert_tokenizer._convert_id_to_token(__UpperCamelCase )
input_tokens.append(__UpperCamelCase )
lowerCamelCase : Dict = add_sub_symbol(__UpperCamelCase, __UpperCamelCase )
lowerCamelCase : Tuple = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__UpperCamelCase ):
if token[:2] == "##":
lowerCamelCase : int = token[2:]
# save chinese tokens' pos
if len(__UpperCamelCase ) == 1 and _is_chinese_char(ord(__UpperCamelCase ) ):
ref_id.append(__UpperCamelCase )
ref_ids.append(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
return ref_ids
def _a ( lowerCamelCase ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name, """r""", encoding="""utf-8""" ) as f:
lowerCamelCase : Optional[int] = f.readlines()
lowerCamelCase : int = [line.strip() for line in data if len(__UpperCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowerCamelCase : List[str] = LTP(args.ltp ) # faster in GPU device
lowerCamelCase : Optional[Any] = BertTokenizer.from_pretrained(args.bert )
lowerCamelCase : Optional[int] = prepare_ref(__UpperCamelCase, __UpperCamelCase, __UpperCamelCase )
with open(args.save_path, """w""", encoding="""utf-8""" ) as f:
lowerCamelCase : Optional[Any] = [json.dumps(__UpperCamelCase ) + """\n""" for ref in ref_ids]
f.writelines(__UpperCamelCase )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
required=False,
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""",
required=False,
type=str,
default="""./resources/ltp""",
help="""resources for LTP tokenizer, usually a path""",
)
parser.add_argument(
"""--bert""",
required=False,
type=str,
default="""./resources/robert""",
help="""resources for Bert tokenizer""",
)
parser.add_argument(
"""--save_path""",
required=False,
type=str,
default="""./resources/ref.txt""",
help="""path to save res""",
)
_lowerCamelCase =parser.parse_args()
main(args)
| 287 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
@dataclass
class a_ :
lowercase = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
lowercase = field(
default=lowerCamelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
lowercase = field(
default=lowerCamelCase , metadata={"""help""": """The column name of the images in the files."""} )
lowercase = field(default=lowerCamelCase , metadata={"""help""": """A folder containing the training data."""} )
lowercase = field(default=lowerCamelCase , metadata={"""help""": """A folder containing the validation data."""} )
lowercase = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
lowercase = field(
default=lowerCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowercase = field(
default=lowerCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = {}
if self.train_dir is not None:
UpperCamelCase = self.train_dir
if self.validation_dir is not None:
UpperCamelCase = self.validation_dir
UpperCamelCase = data_files if data_files else None
@dataclass
class a_ :
lowercase = field(
default=lowerCamelCase , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
lowercase = field(
default=lowerCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
lowercase = field(
default=lowerCamelCase , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
lowercase = field(
default=lowerCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
lowercase = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowercase = field(default=lowerCamelCase , metadata={"""help""": """Name or path of preprocessor config."""} )
lowercase = field(
default=lowerCamelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
lowercase = field(
default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
lowercase = field(
default=lowerCamelCase , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class a_ ( lowerCamelCase ):
lowercase = field(
default=1E-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def lowercase__ ( __UpperCamelCase )-> int:
UpperCamelCase = torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def lowercase__ ( )-> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase ,UpperCamelCase ,UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase ,UpperCamelCase ,UpperCamelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , __UpperCamelCase , __UpperCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase = training_args.get_process_log_level()
logger.setLevel(__UpperCamelCase )
transformers.utils.logging.set_verbosity(__UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
UpperCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
UpperCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCamelCase = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __UpperCamelCase ) and data_args.train_val_split > 0.0:
UpperCamelCase = ds["""train"""].train_test_split(data_args.train_val_split )
UpperCamelCase = split["""train"""]
UpperCamelCase = split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
UpperCamelCase = ViTMAEConfig.from_pretrained(model_args.config_name , **__UpperCamelCase )
elif model_args.model_name_or_path:
UpperCamelCase = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__UpperCamelCase )
else:
UpperCamelCase = ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(F"New config: {config}" )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
UpperCamelCase = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__UpperCamelCase )
elif model_args.model_name_or_path:
UpperCamelCase = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__UpperCamelCase )
else:
UpperCamelCase = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
UpperCamelCase = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
UpperCamelCase = ViTMAEForPreTraining(__UpperCamelCase )
if training_args.do_train:
UpperCamelCase = ds["""train"""].column_names
else:
UpperCamelCase = ds["""validation"""].column_names
if data_args.image_column_name is not None:
UpperCamelCase = data_args.image_column_name
elif "image" in column_names:
UpperCamelCase = """image"""
elif "img" in column_names:
UpperCamelCase = """img"""
else:
UpperCamelCase = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
UpperCamelCase = image_processor.size["""shortest_edge"""]
else:
UpperCamelCase = (image_processor.size["""height"""], image_processor.size["""width"""])
UpperCamelCase = Compose(
[
Lambda(lambda __UpperCamelCase : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(__UpperCamelCase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(__UpperCamelCase ):
UpperCamelCase = [transforms(__UpperCamelCase ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
UpperCamelCase = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__UpperCamelCase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
UpperCamelCase = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__UpperCamelCase )
# Compute absolute learning rate
UpperCamelCase = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
UpperCamelCase = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
UpperCamelCase = Trainer(
model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=__UpperCamelCase , data_collator=__UpperCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase = last_checkpoint
UpperCamelCase = trainer.train(resume_from_checkpoint=__UpperCamelCase )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase = trainer.evaluate()
trainer.log_metrics("""eval""" , __UpperCamelCase )
trainer.save_metrics("""eval""" , __UpperCamelCase )
# Write model card and (optionally) push to hub
UpperCamelCase = {
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__UpperCamelCase )
else:
trainer.create_model_card(**__UpperCamelCase )
def lowercase__ ( __UpperCamelCase )-> List[str]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 321 | 0 |
"""simple docstring"""
from PIL import Image
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Image:
def brightness(__lowerCamelCase ) -> float:
return 1_28 + level + (c - 1_28)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(__UpperCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
lowerCAmelCase_ = change_brightness(img, 100)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 16 |
'''simple docstring'''
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # pylint: disable=invalid-name
SCREAMING_SNAKE_CASE__ = 2_5_6
class a_ ( lowerCamelCase ):
lowercase = ["""melgan"""]
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> None:
"""simple docstring"""
super().__init__()
# From MELGAN
UpperCamelCase = math.log(1e-5 ) # Matches MelGAN training.
UpperCamelCase = 4.0 # Largest value for most examples
UpperCamelCase = 128
self.register_modules(
notes_encoder=_SCREAMING_SNAKE_CASE , continuous_encoder=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , melgan=_SCREAMING_SNAKE_CASE , )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=(-1.0, 1.0) , _SCREAMING_SNAKE_CASE=False ) -> Any:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = output_range
if clip:
UpperCamelCase = torch.clip(_SCREAMING_SNAKE_CASE , self.min_value , self.max_value )
# Scale to [0, 1].
UpperCamelCase = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=(-1.0, 1.0) , _SCREAMING_SNAKE_CASE=False ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = input_range
UpperCamelCase = torch.clip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if clip else outputs
# Scale to [0, 1].
UpperCamelCase = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = input_tokens > 0
UpperCamelCase ,UpperCamelCase = self.notes_encoder(
encoder_input_tokens=_SCREAMING_SNAKE_CASE , encoder_inputs_mask=_SCREAMING_SNAKE_CASE )
UpperCamelCase ,UpperCamelCase = self.continuous_encoder(
encoder_inputs=_SCREAMING_SNAKE_CASE , encoder_inputs_mask=_SCREAMING_SNAKE_CASE )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase = noise_time
if not torch.is_tensor(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(_SCREAMING_SNAKE_CASE ) and len(timesteps.shape ) == 0:
UpperCamelCase = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCamelCase = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
UpperCamelCase = self.decoder(
encodings_and_masks=_SCREAMING_SNAKE_CASE , decoder_input_tokens=_SCREAMING_SNAKE_CASE , decoder_noise_time=_SCREAMING_SNAKE_CASE )
return logits
@torch.no_grad()
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 100 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = "numpy" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
"""simple docstring"""
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(_SCREAMING_SNAKE_CASE )}." )
UpperCamelCase = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
UpperCamelCase = np.zeros([1, 0, self.n_dims] , np.floataa )
UpperCamelCase = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_SCREAMING_SNAKE_CASE , device=self.device )
for i, encoder_input_tokens in enumerate(_SCREAMING_SNAKE_CASE ):
if i == 0:
UpperCamelCase = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
UpperCamelCase = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_SCREAMING_SNAKE_CASE , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
UpperCamelCase = ones
UpperCamelCase = self.scale_features(
_SCREAMING_SNAKE_CASE , output_range=[-1.0, 1.0] , clip=_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_SCREAMING_SNAKE_CASE , continuous_mask=_SCREAMING_SNAKE_CASE , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
UpperCamelCase = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=_SCREAMING_SNAKE_CASE , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCamelCase = self.decode(
encodings_and_masks=_SCREAMING_SNAKE_CASE , input_tokens=_SCREAMING_SNAKE_CASE , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
UpperCamelCase = self.scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase = self.scale_to_features(_SCREAMING_SNAKE_CASE , input_range=[-1.0, 1.0] )
UpperCamelCase = mel[:1]
UpperCamelCase = mel.cpu().float().numpy()
UpperCamelCase = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
logger.info("""Generated segment""" , _SCREAMING_SNAKE_CASE )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"""Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'.""" )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"""Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'.""" )
if output_type == "numpy":
UpperCamelCase = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
UpperCamelCase = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=_SCREAMING_SNAKE_CASE )
| 321 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_a = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 61 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase = 4000000 )-> int:
UpperCamelCase = []
UpperCamelCase ,UpperCamelCase = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__UpperCamelCase )
UpperCamelCase ,UpperCamelCase = b, a + b
return sum(__UpperCamelCase )
if __name__ == "__main__":
print(f'{solution() = }')
| 321 | 0 |
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def _lowerCAmelCase ( _UpperCamelCase : Union[str, Any] ) -> str: # picklable for multiprocessing
"""simple docstring"""
return x.sum()
def _lowerCAmelCase ( _UpperCamelCase : Dict ) -> Union[str, Any]: # picklable for multiprocessing
"""simple docstring"""
return i + 1
@dataclass
class A__ :
A__ = 42
A__ = 42
class A__ ( A__ ):
def A ( self : List[Any] ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =[1, 2]
_SCREAMING_SNAKE_CASE ={'a': 1, 'b': 2}
_SCREAMING_SNAKE_CASE ={'a': [1, 2], 'b': [3, 4]}
_SCREAMING_SNAKE_CASE ={'a': {'1': 1}, 'b': 2}
_SCREAMING_SNAKE_CASE ={'a': 1, 'b': 2, 'c': 3, 'd': 4}
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =2
_SCREAMING_SNAKE_CASE =[2, 3]
_SCREAMING_SNAKE_CASE ={'a': 2, 'b': 3}
_SCREAMING_SNAKE_CASE ={'a': [2, 3], 'b': [4, 5]}
_SCREAMING_SNAKE_CASE ={'a': {'1': 2}, 'b': 3}
_SCREAMING_SNAKE_CASE ={'a': 2, 'b': 3, 'c': 4, 'd': 5}
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE =2
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE ={'a': np.eye(2 ), 'b': np.zeros(3 ), 'c': np.ones(2 )}
_SCREAMING_SNAKE_CASE ={'a': 2, 'b': 0, 'c': 2}
_SCREAMING_SNAKE_CASE ={
'a': np.eye(2 ).astype(_SCREAMING_SNAKE_CASE ),
'b': np.zeros(3 ).astype(_SCREAMING_SNAKE_CASE ),
'c': np.ones(2 ).astype(_SCREAMING_SNAKE_CASE ),
}
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , map_numpy=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , map_numpy=_SCREAMING_SNAKE_CASE ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , map_numpy=_SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , map_numpy=_SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(_SCREAMING_SNAKE_CASE ): # can't pickle a local lambda
map_nested(lambda _a : x + 1 , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE )
def A ( self : List[Any] ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ={'a': 1, 'b': 2}
_SCREAMING_SNAKE_CASE ={'a': 3, 'b': 4}
_SCREAMING_SNAKE_CASE ={'a': 5, 'b': 6}
_SCREAMING_SNAKE_CASE =sorted([('a', (1, 3, 5)), ('b', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) , _SCREAMING_SNAKE_CASE )
def A ( self : str ) -> str:
'''simple docstring'''
class A__ :
A__ = 'bar'
_SCREAMING_SNAKE_CASE =Foo()
self.assertEqual(foo.my_attr , 'bar' )
with temporary_assignment(_SCREAMING_SNAKE_CASE , 'my_attr' , 'BAR' ):
self.assertEqual(foo.my_attr , 'BAR' )
self.assertEqual(foo.my_attr , 'bar' )
@pytest.mark.parametrize(
'iterable_length, num_proc, expected_num_proc' , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def _lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : int ) -> Optional[Any]:
"""simple docstring"""
with patch('datasets.utils.py_utils._single_map_nested' ) as mock_single_map_nested, patch(
'datasets.parallel.parallel.Pool' ) as mock_multiprocessing_pool:
_SCREAMING_SNAKE_CASE ={f"{i}": i for i in range(__UpperCamelCase )}
_SCREAMING_SNAKE_CASE =map_nested(lambda _UpperCamelCase : x + 10 , __UpperCamelCase , num_proc=__UpperCamelCase , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class A__ ( A__ ):
@require_tf
def A ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
import tensorflow as tf
from tensorflow.keras import layers
_SCREAMING_SNAKE_CASE =layers.Dense(2 )
def gen_random_output():
_SCREAMING_SNAKE_CASE =tf.random.uniform((1, 3) )
return model(_SCREAMING_SNAKE_CASE ).numpy()
with temp_seed(42 , set_tensorflow=_SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE =gen_random_output()
with temp_seed(42 , set_tensorflow=_SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE =gen_random_output()
_SCREAMING_SNAKE_CASE =gen_random_output()
np.testing.assert_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def A ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
import torch
def gen_random_output():
_SCREAMING_SNAKE_CASE =torch.nn.Linear(3 , 2 )
_SCREAMING_SNAKE_CASE =torch.rand(1 , 3 )
return model(_SCREAMING_SNAKE_CASE ).detach().numpy()
with temp_seed(42 , set_pytorch=_SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE =gen_random_output()
with temp_seed(42 , set_pytorch=_SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE =gen_random_output()
_SCREAMING_SNAKE_CASE =gen_random_output()
np.testing.assert_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def A ( self : str ) -> List[str]:
'''simple docstring'''
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
_SCREAMING_SNAKE_CASE =gen_random_output()
with temp_seed(42 ):
_SCREAMING_SNAKE_CASE =gen_random_output()
_SCREAMING_SNAKE_CASE =gen_random_output()
np.testing.assert_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize('input_data' , [{}] )
def _lowerCAmelCase ( _UpperCamelCase : Tuple ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =NestedDataStructure(__UpperCamelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
'data, expected_output' , [
({}, []),
([], []),
('foo', ['foo']),
(['foo', 'bar'], ['foo', 'bar']),
([['foo', 'bar']], ['foo', 'bar']),
([[['foo'], ['bar']]], ['foo', 'bar']),
([[['foo'], 'bar']], ['foo', 'bar']),
({'a': 1, 'b': 2}, [1, 2]),
({'a': [1, 2], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[[3], [4]]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, [4]]}, [1, 2, 3, 4]),
({'a': {'1': 1}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': [2]}, [1, 2]),
] , )
def _lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : List[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =NestedDataStructure(__UpperCamelCase ).flatten()
assert output == expected_output
def _lowerCAmelCase ( ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =A(x=1 , y='foobar' )
_SCREAMING_SNAKE_CASE ={'x': 1, 'y': 'foobar'}
assert asdict(__UpperCamelCase ) == expected_output
_SCREAMING_SNAKE_CASE ={'a': {'b': A(x=10 , y='foo' )}, 'c': [A(x=20 , y='bar' )]}
_SCREAMING_SNAKE_CASE ={'a': {'b': {'x': 10, 'y': 'foo'}}, 'c': [{'x': 20, 'y': 'bar'}]}
assert asdict(__UpperCamelCase ) == expected_output
with pytest.raises(__UpperCamelCase ):
asdict([1, A(x=10 , y='foo' )] )
def _lowerCAmelCase ( _UpperCamelCase : Dict ) -> str:
"""simple docstring"""
return text.split()
def _lowerCAmelCase ( _UpperCamelCase : Dict ) -> List[Any]:
"""simple docstring"""
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def _lowerCAmelCase ( ) -> Tuple:
"""simple docstring"""
with Pool(2 ) as pool:
_SCREAMING_SNAKE_CASE =list(iflatmap_unordered(__UpperCamelCase , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 10 ) )
assert out.count('hello' ) == 10
assert out.count('there' ) == 10
assert len(__UpperCamelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
_SCREAMING_SNAKE_CASE =list(iflatmap_unordered(__UpperCamelCase , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 10 ) )
assert out.count('hello' ) == 10
assert out.count('there' ) == 10
assert len(__UpperCamelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
_SCREAMING_SNAKE_CASE =[]
for yield_time, content in iflatmap_unordered(
__UpperCamelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'content': 'a'}, {'content': 'b'}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(__UpperCamelCase )
assert out.count('a' ) == 2
assert out.count('b' ) == 2
assert len(__UpperCamelCase ) == 4
| 47 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__UpperCamelCase ) )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> bool:
# Base Case
if index == len(__UpperCamelCase ):
return True
# Recursive Step
for i in range(__UpperCamelCase ):
if valid_coloring(graph[index] , __UpperCamelCase , __UpperCamelCase ):
# Color current vertex
UpperCamelCase = i
# Validate coloring
if util_color(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , index + 1 ):
return True
# Backtrack
UpperCamelCase = -1
return False
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> list[int]:
UpperCamelCase = [-1] * len(__UpperCamelCase )
if util_color(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , 0 ):
return colored_vertices
return []
| 321 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import pow, sqrt
def __UpperCAmelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict ) -> dict[str, float]:
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance == 0:
return {"resistance": sqrt(pow(__UpperCamelCase , 2 ) - pow(__UpperCamelCase , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(__UpperCamelCase , 2 ) - pow(__UpperCamelCase , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(__UpperCamelCase , 2 ) + pow(__UpperCamelCase , 2 ) )}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 172 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase = 2000000 )-> int:
UpperCamelCase = [0 for i in range(n + 1 )]
UpperCamelCase = 1
UpperCamelCase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , __UpperCamelCase ):
UpperCamelCase = 1
UpperCamelCase = 0
for i in range(__UpperCamelCase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'{solution() = }')
| 321 | 0 |
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
a__ = getLogger(__name__)
a__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def __UpperCAmelCase ( __a : Tuple ,__a : Any ,__a : Optional[Any] ,__a : Tuple = 8 ,__a : Union[str, Any] = DEFAULT_DEVICE ,__a : str=False ,__a : Tuple="summarization" ,__a : Optional[int]=None ,**__a : Tuple ,) -> Dict:
"""simple docstring"""
_a : Optional[Any] = Path(__UpperCamelCase ).open('''w''' ,encoding='''utf-8''' )
_a : Any = str(__UpperCamelCase )
_a : Tuple = AutoModelForSeqaSeqLM.from_pretrained(__UpperCamelCase ).to(__UpperCamelCase )
if fpaa:
_a : Optional[Any] = model.half()
_a : str = AutoTokenizer.from_pretrained(__UpperCamelCase )
logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
_a : Optional[Any] = time.time()
# update config with task specific params
use_task_specific_params(__UpperCamelCase ,__UpperCamelCase )
if prefix is None:
_a : Dict = prefix or getattr(model.config ,'''prefix''' ,'''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(__UpperCamelCase ,__UpperCamelCase ) ) ):
_a : Optional[int] = [prefix + text for text in examples_chunk]
_a : Union[str, Any] = tokenizer(__UpperCamelCase ,return_tensors='''pt''' ,truncation=__UpperCamelCase ,padding='''longest''' ).to(__UpperCamelCase )
_a : Optional[Any] = model.generate(
input_ids=batch.input_ids ,attention_mask=batch.attention_mask ,**__UpperCamelCase ,)
_a : str = tokenizer.batch_decode(__UpperCamelCase ,skip_special_tokens=__UpperCamelCase ,clean_up_tokenization_spaces=__UpperCamelCase )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
_a : List[Any] = int(time.time() - start_time ) # seconds
_a : Dict = len(__UpperCamelCase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs ,4 )}
def __UpperCAmelCase ( ) -> str:
"""simple docstring"""
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def __UpperCAmelCase ( __a : List[str]=True ) -> str:
"""simple docstring"""
_a : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''model_name''' ,type=__UpperCamelCase ,help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' ,type=__UpperCamelCase ,help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' ,type=__UpperCamelCase ,help='''where to save summaries''' )
parser.add_argument('''--reference_path''' ,type=__UpperCamelCase ,required=__UpperCamelCase ,help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' ,type=__UpperCamelCase ,required=__UpperCamelCase ,default='''metrics.json''' ,help='''where to save metrics''' )
parser.add_argument('''--device''' ,type=__UpperCamelCase ,required=__UpperCamelCase ,default=__UpperCamelCase ,help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' ,type=__UpperCamelCase ,required=__UpperCamelCase ,default=__UpperCamelCase ,help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' ,type=__UpperCamelCase ,default='''summarization''' ,help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' ,type=__UpperCamelCase ,default=8 ,required=__UpperCamelCase ,help='''batch size''' )
parser.add_argument(
'''--n_obs''' ,type=__UpperCamelCase ,default=-1 ,required=__UpperCamelCase ,help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' ,action='''store_true''' )
parser.add_argument('''--dump-args''' ,action='''store_true''' ,help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' ,nargs='''?''' ,type=__UpperCamelCase ,const=datetime_now() ,help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) ,)
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
_a , _a : Tuple = parser.parse_known_args()
_a : Optional[Any] = parse_numeric_n_bool_cl_kwargs(__UpperCamelCase )
if parsed_args and verbose:
print(F"""parsed the following generate kwargs: {parsed_args}""" )
_a : Dict = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
_a : str = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=__UpperCamelCase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
_a : Union[str, Any] = generate_summaries_or_translations(
__UpperCamelCase ,args.save_path ,args.model_name ,batch_size=args.bs ,device=args.device ,fpaa=args.fpaa ,task=args.task ,prefix=args.prefix ,**__UpperCamelCase ,)
if args.reference_path is None:
return {}
# Compute scores
_a : List[Any] = calculate_bleu if '''translation''' in args.task else calculate_rouge
_a : Dict = [x.rstrip() for x in open(args.save_path ).readlines()]
_a : List[Any] = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(__UpperCamelCase )]
_a : Optional[int] = score_fn(__UpperCamelCase ,__UpperCamelCase )
scores.update(__UpperCamelCase )
if args.dump_args:
scores.update(__UpperCamelCase )
if args.info:
_a : Optional[int] = args.info
if verbose:
print(__UpperCamelCase )
if args.score_path is not None:
json.dump(__UpperCamelCase ,open(args.score_path ,'''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 235 |
'''simple docstring'''
from timeit import timeit
def lowercase__ ( __UpperCamelCase )-> int:
if number < 0:
raise ValueError("""the value of input must not be negative""" )
UpperCamelCase = 0
while number:
number &= number - 1
result += 1
return result
def lowercase__ ( __UpperCamelCase )-> int:
if number < 0:
raise ValueError("""the value of input must not be negative""" )
UpperCamelCase = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def lowercase__ ( )-> None:
def do_benchmark(__UpperCamelCase ) -> None:
UpperCamelCase = """import __main__ as z"""
print(F"Benchmark when {number = }:" )
print(F"{get_set_bits_count_using_modulo_operator(__UpperCamelCase ) = }" )
UpperCamelCase = timeit("""z.get_set_bits_count_using_modulo_operator(25)""" , setup=__UpperCamelCase )
print(F"timeit() runs in {timing} seconds" )
print(F"{get_set_bits_count_using_brian_kernighans_algorithm(__UpperCamelCase ) = }" )
UpperCamelCase = timeit(
"""z.get_set_bits_count_using_brian_kernighans_algorithm(25)""" , setup=__UpperCamelCase , )
print(F"timeit() runs in {timing} seconds" )
for number in (25, 37, 58, 0):
do_benchmark(__UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 321 | 0 |
"""simple docstring"""
import os
def _lowercase ( __lowerCAmelCase ) -> Any:
SCREAMING_SNAKE_CASE__ : List[Any] = len(grid[0] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = len(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(__UpperCamelCase ):
for j in range(n_rows - 3 ):
SCREAMING_SNAKE_CASE__ : List[str] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
SCREAMING_SNAKE_CASE__ : List[str] = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
SCREAMING_SNAKE_CASE__ : int = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
SCREAMING_SNAKE_CASE__ : List[str] = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if max_product > largest:
SCREAMING_SNAKE_CASE__ : str = max_product
return largest
def _lowercase ( ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : str = []
with open(os.path.dirname(__UpperCamelCase ) + """/grid.txt""" ) as file:
for line in file:
grid.append(line.strip("""\n""" ).split(""" """ ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = [[int(__UpperCamelCase ) for i in grid[j]] for j in range(len(__UpperCamelCase ) )]
return largest_product(__UpperCamelCase )
if __name__ == "__main__":
print(solution())
| 132 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimesformerModel',
'TimesformerForVideoClassification',
'TimesformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 321 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowercase__ ( unittest.TestCase):
@slow
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = TFXLMRobertaModel.from_pretrained('''jplu/tf-xlm-roberta-base''' )
SCREAMING_SNAKE_CASE : List[str] = {
'''input_ids''': tf.convert_to_tensor([[0, 2646, 1_0269, 83, 9_9942, 2]] , dtype=tf.intaa ), # "My dog is cute"
'''attention_mask''': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
SCREAMING_SNAKE_CASE : Tuple = model(_SCREAMING_SNAKE_CASE )['''last_hidden_state''']
SCREAMING_SNAKE_CASE : int = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.convert_to_tensor(
[
[
[0.068_1762, 0.1089_4451, 0.0677_2504],
[-0.0642_3668, 0.0236_6615, 0.0432_9344],
[-0.0605_7295, 0.0997_4135, -0.0007_0584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 182 |
'''simple docstring'''
import math
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> float:
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__UpperCamelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 321 | 0 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_UpperCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_UpperCamelCase = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class lowerCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Any ) -> int:
__lowerCamelCase : Any = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) )
__lowerCamelCase : Any = self.diffusers_dir
shutil.copy(
os.path.join(_SCREAMING_SNAKE_CASE , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , )
def _lowercase ( self : List[Any] ) -> Dict:
__lowerCamelCase : List[str] = 'src/diffusers'
shutil.rmtree(self.diffusers_dir )
def _lowercase ( self : List[Any] , _a : str , _a : Union[str, Any] , _a : Union[str, Any] , _a : Dict=None ) -> Union[str, Any]:
__lowerCamelCase : str = comment + f'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
__lowerCamelCase : Dict = comment + f'\nclass {class_name}(nn.Module):\n' + overwrite_result
__lowerCamelCase : Dict = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
__lowerCamelCase : str = black.format_str(_SCREAMING_SNAKE_CASE , mode=_SCREAMING_SNAKE_CASE )
__lowerCamelCase : Tuple = os.path.join(self.diffusers_dir , 'new_code.py' )
with open(_SCREAMING_SNAKE_CASE , 'w' , newline='\n' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_SCREAMING_SNAKE_CASE ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , 'r' ) as f:
self.assertTrue(f.read() , _SCREAMING_SNAKE_CASE )
def _lowercase ( self : str ) -> List[str]:
__lowerCamelCase : Optional[int] = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowercase ( self : Dict ) -> List[str]:
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , _SCREAMING_SNAKE_CASE , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , _SCREAMING_SNAKE_CASE ) , )
# Copy consistency with a really long name
__lowerCamelCase : str = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}' , f'{long_class_name}SchedulerOutput' , re.sub('Bert' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , _SCREAMING_SNAKE_CASE , overwrite_result=re.sub('DDPM' , 'Test' , _SCREAMING_SNAKE_CASE ) , )
| 208 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
SCREAMING_SNAKE_CASE__ = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
SCREAMING_SNAKE_CASE__ = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
SCREAMING_SNAKE_CASE__ = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def A__ ( self ) -> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {
"""id""": datasets.Value("""string""" ),
"""prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ),
},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://www.atticusprojectai.org/cuad"""] , reference_urls=["""https://www.atticusprojectai.org/cuad"""] , )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
UpperCamelCase = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
UpperCamelCase = evaluate(dataset=_SCREAMING_SNAKE_CASE , predictions=_SCREAMING_SNAKE_CASE )
return score
| 321 | 0 |
'''simple docstring'''
from pathlib import Path
import numpy as np
from PIL import Image
def snake_case ( UpperCAmelCase )-> np.ndarray:
"""simple docstring"""
__A , __A , __A = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2989 * r + 0.5870 * g + 0.1140 * b
def snake_case ( UpperCAmelCase )-> np.ndarray:
"""simple docstring"""
return (gray > 1_2_7) & (gray <= 2_5_5)
def snake_case ( UpperCAmelCase , UpperCAmelCase )-> np.ndarray:
"""simple docstring"""
__A = np.zeros_like(__UpperCamelCase )
__A = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
__A = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
__A = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
__A = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
a__ : List[Any] = Path(__file__).resolve().parent / "image_data" / "lena.jpg"
a__ : List[str] = np.array(Image.open(lena_path))
# kernel to be applied
a__ : List[str] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
a__ : Optional[int] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
a__ : Optional[Any] = Image.fromarray(output).convert("RGB")
pil_img.save("result_dilation.png")
| 161 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase )-> int:
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
UpperCamelCase = 1
UpperCamelCase = 1
while repunit:
UpperCamelCase = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def lowercase__ ( __UpperCamelCase = 1000000 )-> int:
UpperCamelCase = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(__UpperCamelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f'{solution() = }')
| 321 | 0 |
'''simple docstring'''
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def a ( __a ) -> Tuple:
'''simple docstring'''
return getitem, k
def a ( __a , __a ) -> Any:
'''simple docstring'''
return setitem, k, v
def a ( __a ) -> Optional[Any]:
'''simple docstring'''
return delitem, k
def a ( __a , __a , *__a ) -> str:
'''simple docstring'''
try:
return fun(__UpperCamelCase , *__UpperCamelCase ), None
except Exception as e:
return None, e
__snake_case = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
__snake_case = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
__snake_case = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
__snake_case = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
__snake_case = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
__snake_case = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
'''operations''' , (
pytest.param(_add_items , id='''add items''' ),
pytest.param(_overwrite_items , id='''overwrite items''' ),
pytest.param(_delete_items , id='''delete items''' ),
pytest.param(_access_absent_items , id='''access absent items''' ),
pytest.param(_add_with_resize_up , id='''add with resize up''' ),
pytest.param(_add_with_resize_down , id='''add with resize down''' ),
) , )
def a ( __a ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :Dict = HashMap(initial_block_size=4 )
UpperCamelCase__ :Any = {}
for _, (fun, *args) in enumerate(__UpperCamelCase ):
UpperCamelCase__ , UpperCamelCase__ :Any = _run_operation(__UpperCamelCase , __UpperCamelCase , *__UpperCamelCase )
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = _run_operation(__UpperCamelCase , __UpperCamelCase , *__UpperCamelCase )
assert my_res == py_res
assert str(__UpperCamelCase ) == str(__UpperCamelCase )
assert set(__UpperCamelCase ) == set(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
assert set(my.items() ) == set(py.items() )
def a ( ) -> int:
'''simple docstring'''
def is_public(__a ) -> bool:
return not name.startswith('''_''' )
UpperCamelCase__ :Dict = {name for name in dir({} ) if is_public(__UpperCamelCase )}
UpperCamelCase__ :int = {name for name in dir(HashMap() ) if is_public(__UpperCamelCase )}
assert dict_public_names > hash_public_names | 97 |
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance == 0:
return {"resistance": sqrt(pow(__UpperCamelCase , 2 ) - pow(__UpperCamelCase , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(__UpperCamelCase , 2 ) - pow(__UpperCamelCase , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(__UpperCamelCase , 2 ) + pow(__UpperCamelCase , 2 ) )}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 321 | 0 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 287 |
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def lowercase__ ( __UpperCamelCase )-> Union[str, Any]:
UpperCamelCase = min(__UpperCamelCase ) # min() finds the minimum value
UpperCamelCase = max(__UpperCamelCase ) # max() finds the maximum value
UpperCamelCase = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
UpperCamelCase = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(__UpperCamelCase , __UpperCamelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
UpperCamelCase = 0
for count in range(__UpperCamelCase ):
while holes[count] > 0:
holes[count] -= 1
UpperCamelCase = count + min_val
i += 1
def lowercase__ ( )-> Any:
UpperCamelCase = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(__UpperCamelCase )
print("""Sorted order is:""" , """ """.join(__UpperCamelCase ) )
if __name__ == "__main__":
main()
| 321 | 0 |
"""simple docstring"""
class __A :
'''simple docstring'''
def __init__( self : Dict ,_snake_case : List[str] ,_snake_case : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase__ : List[Any] = name
lowercase__ : Tuple = val
def __str__( self : Dict ) -> Tuple:
"""simple docstring"""
return f"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self : int ,_snake_case : Any ) -> str:
"""simple docstring"""
return self.val < other.val
class __A :
'''simple docstring'''
def __init__( self : List[str] ,_snake_case : Optional[Any] ) -> str:
"""simple docstring"""
lowercase__ : str = {}
lowercase__ : List[Any] = {}
lowercase__ : Any = self.build_heap(_SCREAMING_SNAKE_CASE )
def __getitem__( self : str ,_snake_case : List[str] ) -> int:
"""simple docstring"""
return self.get_value(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self : str ,_snake_case : Dict ) -> int:
"""simple docstring"""
return (idx - 1) // 2
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Tuple ) -> Optional[int]:
"""simple docstring"""
return idx * 2 + 1
def UpperCAmelCase ( self : Tuple ,_snake_case : Any ) -> Dict:
"""simple docstring"""
return idx * 2 + 2
def UpperCAmelCase ( self : Any ,_snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.heap_dict[key]
def UpperCAmelCase ( self : Any ,_snake_case : List[str] ) -> Any:
"""simple docstring"""
lowercase__ : List[Any] = len(_SCREAMING_SNAKE_CASE ) - 1
lowercase__ : str = self.get_parent_idx(_SCREAMING_SNAKE_CASE )
for idx, i in enumerate(_SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[Any] = idx
lowercase__ : str = i.val
for i in range(_SCREAMING_SNAKE_CASE ,-1 ,-1 ):
self.sift_down(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
return array
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Union[str, Any] ,_snake_case : Tuple ) -> List[str]:
"""simple docstring"""
while True:
lowercase__ : Tuple = self.get_left_child_idx(_SCREAMING_SNAKE_CASE ) # noqa: E741
lowercase__ : List[str] = self.get_right_child_idx(_SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = idx
if l < len(_SCREAMING_SNAKE_CASE ) and array[l] < array[idx]:
lowercase__ : int = l
if r < len(_SCREAMING_SNAKE_CASE ) and array[r] < array[smallest]:
lowercase__ : Optional[Any] = r
if smallest != idx:
lowercase__ , lowercase__ : Optional[Any] = array[smallest], array[idx]
(
(
lowercase__
) , (
lowercase__
) ,
) : Union[str, Any] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
lowercase__ : Optional[int] = smallest
else:
break
def UpperCAmelCase ( self : Dict ,_snake_case : int ) -> Dict:
"""simple docstring"""
lowercase__ : Optional[int] = self.get_parent_idx(_SCREAMING_SNAKE_CASE )
while p >= 0 and self.heap[p] > self.heap[idx]:
lowercase__ , lowercase__ : Any = self.heap[idx], self.heap[p]
lowercase__ , lowercase__ : Dict = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
lowercase__ : int = p
lowercase__ : int = self.get_parent_idx(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
return self.heap[0]
def UpperCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
lowercase__ , lowercase__ : Optional[Any] = self.heap[-1], self.heap[0]
lowercase__ , lowercase__ : List[Any] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
lowercase__ : Optional[int] = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 ,self.heap )
return x
def UpperCAmelCase ( self : Dict ,_snake_case : Dict ) -> int:
"""simple docstring"""
self.heap.append(_SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = len(self.heap ) - 1
lowercase__ : Optional[Any] = node.val
self.sift_up(len(self.heap ) - 1 )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
return len(self.heap ) == 0
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Union[str, Any] ,_snake_case : Dict ) -> List[Any]:
"""simple docstring"""
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
lowercase__ : Dict = new_value
lowercase__ : Union[str, Any] = new_value
self.sift_up(self.idx_of_element[node] )
lowerCAmelCase_ = Node('R', -1)
lowerCAmelCase_ = Node('B', 6)
lowerCAmelCase_ = Node('A', 3)
lowerCAmelCase_ = Node('X', 1)
lowerCAmelCase_ = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
lowerCAmelCase_ = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 |
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class a_ ( lowerCamelCase ):
lowercase = (DDPMParallelScheduler,)
def A__ ( self , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def A__ ( self ) -> List[str]:
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=_SCREAMING_SNAKE_CASE , beta_end=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Tuple:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> str:
"""simple docstring"""
self.check_over_configs(thresholding=_SCREAMING_SNAKE_CASE )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_SCREAMING_SNAKE_CASE , prediction_type=_SCREAMING_SNAKE_CASE , sample_max_value=_SCREAMING_SNAKE_CASE , )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = self.dummy_sample_deter + 0.1
UpperCamelCase = self.dummy_sample_deter - 0.1
UpperCamelCase = samplea.shape[0]
UpperCamelCase = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCamelCase = torch.arange(_SCREAMING_SNAKE_CASE )[0:3, None].repeat(1 , _SCREAMING_SNAKE_CASE )
UpperCamelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCamelCase = scheduler.batch_step_no_noise(_SCREAMING_SNAKE_CASE , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1e-2
assert abs(result_mean.item() - 0.5_0_0_5 ) < 1e-3
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(_SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
UpperCamelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(_SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
UpperCamelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
UpperCamelCase = scheduler.timesteps
for i, timestep in enumerate(_SCREAMING_SNAKE_CASE ):
if i == len(_SCREAMING_SNAKE_CASE ) - 1:
UpperCamelCase = -1
else:
UpperCamelCase = timesteps[i + 1]
UpperCamelCase = scheduler.previous_timestep(_SCREAMING_SNAKE_CASE )
UpperCamelCase = prev_t.item()
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = [100, 87, 50, 51, 0]
with self.assertRaises(_SCREAMING_SNAKE_CASE , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = [100, 87, 50, 1, 0]
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
with self.assertRaises(_SCREAMING_SNAKE_CASE , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=_SCREAMING_SNAKE_CASE , timesteps=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_SCREAMING_SNAKE_CASE , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
| 321 | 0 |
"""simple docstring"""
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : List[Any] = 1
for i in range(1, num + 1 ):
fact *= i
return fact
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : int = 0
while number > 0:
UpperCAmelCase_ : Optional[Any] = number % 10
sum_of_digits += last_digit
UpperCAmelCase_ : Any = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def __a ( __lowerCamelCase = 100 ):
UpperCAmelCase_ : List[str] = factorial(__UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = split_and_add(__UpperCamelCase )
return result
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 61 |
'''simple docstring'''
from __future__ import annotations
import math
class a_ :
def __init__( self , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
UpperCamelCase = size
# approximate the overall size of segment tree with given value
UpperCamelCase = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
UpperCamelCase = [0 for i in range(0 , 4 * size )]
UpperCamelCase = [0 for i in range(0 , 4 * size )] # flag for lazy update
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return idx * 2
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return idx * 2 + 1
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
if left_element == right_element:
UpperCamelCase = a[left_element - 1]
else:
UpperCamelCase = (left_element + right_element) // 2
self.build(self.left(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.build(self.right(_SCREAMING_SNAKE_CASE ) , mid + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = max(
self.segment_tree[self.left(_SCREAMING_SNAKE_CASE )] , self.segment_tree[self.right(_SCREAMING_SNAKE_CASE )] )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
if self.flag[idx] is True:
UpperCamelCase = self.lazy[idx]
UpperCamelCase = False
if left_element != right_element:
UpperCamelCase = self.lazy[idx]
UpperCamelCase = self.lazy[idx]
UpperCamelCase = True
UpperCamelCase = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
UpperCamelCase = val
if left_element != right_element:
UpperCamelCase = val
UpperCamelCase = val
UpperCamelCase = True
UpperCamelCase = True
return True
UpperCamelCase = (left_element + right_element) // 2
self.update(self.left(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.update(self.right(_SCREAMING_SNAKE_CASE ) , mid + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = max(
self.segment_tree[self.left(_SCREAMING_SNAKE_CASE )] , self.segment_tree[self.right(_SCREAMING_SNAKE_CASE )] )
return True
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int | float:
"""simple docstring"""
if self.flag[idx] is True:
UpperCamelCase = self.lazy[idx]
UpperCamelCase = False
if left_element != right_element:
UpperCamelCase = self.lazy[idx]
UpperCamelCase = self.lazy[idx]
UpperCamelCase = True
UpperCamelCase = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
UpperCamelCase = (left_element + right_element) // 2
UpperCamelCase = self.query(self.left(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self.query(self.right(_SCREAMING_SNAKE_CASE ) , mid + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __str__( self ) -> str:
"""simple docstring"""
return str([self.query(1 , 1 , self.size , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8]
SCREAMING_SNAKE_CASE__ = 1_5
SCREAMING_SNAKE_CASE__ = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 1_1))
print(segt.query(1, 1, size, 7, 1_2))
segt.update(1, 1, size, 1, 3, 1_1_1)
print(segt.query(1, 1, size, 1, 1_5))
segt.update(1, 1, size, 7, 8, 2_3_5)
print(segt)
| 321 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( _UpperCamelCase : Union[str, Any] ) -> None:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =generate_pascal_triangle(__UpperCamelCase )
for row_idx in range(__UpperCamelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=' ' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=' ' )
else:
print(triangle[row_idx][col_idx] , end='' )
print()
def _lowerCAmelCase ( _UpperCamelCase : Union[str, Any] ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
_SCREAMING_SNAKE_CASE =[]
for current_row_idx in range(__UpperCamelCase ):
_SCREAMING_SNAKE_CASE =populate_current_row(__UpperCamelCase , __UpperCamelCase )
triangle.append(__UpperCamelCase )
return triangle
def _lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : Any ) -> list[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =1, 1
for current_col_idx in range(1 , __UpperCamelCase ):
calculate_current_element(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return current_row
def _lowerCAmelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] , ) -> None:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =triangle[current_row_idx - 1][current_col_idx - 1]
_SCREAMING_SNAKE_CASE =triangle[current_row_idx - 1][current_col_idx]
_SCREAMING_SNAKE_CASE =above_to_left_elt + above_to_right_elt
def _lowerCAmelCase ( _UpperCamelCase : Tuple ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
_SCREAMING_SNAKE_CASE =[[1]]
for row_index in range(1 , __UpperCamelCase ):
_SCREAMING_SNAKE_CASE =[0] + result[-1] + [0]
_SCREAMING_SNAKE_CASE =row_index + 1
# Calculate the number of distinct elements in a row
_SCREAMING_SNAKE_CASE =sum(divmod(__UpperCamelCase , 2 ) )
_SCREAMING_SNAKE_CASE =[
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
_SCREAMING_SNAKE_CASE =row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
_SCREAMING_SNAKE_CASE =row_first_half + row_second_half
result.append(__UpperCamelCase )
return result
def _lowerCAmelCase ( ) -> None:
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_UpperCamelCase : int , _UpperCamelCase : Optional[int] ) -> None:
_SCREAMING_SNAKE_CASE =f"{func.__name__}({value})"
_SCREAMING_SNAKE_CASE =timeit(f"__main__.{call}" , setup='import __main__' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__UpperCamelCase , __UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 47 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase = 1000 )-> int:
UpperCamelCase = -1
UpperCamelCase = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
UpperCamelCase = (n * n - 2 * a * n) // (2 * n - 2 * a)
UpperCamelCase = n - a - b
if c * c == (a * a + b * b):
UpperCamelCase = a * b * c
if candidate >= product:
UpperCamelCase = candidate
return product
if __name__ == "__main__":
print(f'{solution() = }')
| 321 | 0 |
"""simple docstring"""
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCAmelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int]=() , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Optional[int]="no" , UpperCAmelCase_ : Optional[Any]="29500" ) -> List[Any]:
'''simple docstring'''
__snake_case : List[str] = False
__snake_case : Union[str, Any] = False
if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ):
__snake_case : Tuple = True
elif "IPython" in sys.modules:
__snake_case : int = 'google.colab' in str(sys.modules['IPython'].get_ipython() )
try:
__snake_case : Any = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." )
if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , __UpperCamelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '
'your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if num_processes is None:
__snake_case : List[str] = 8
__snake_case : List[Any] = PrepareForLaunch(__UpperCamelCase , distributed_type='TPU' )
print(F"Launching a training on {num_processes} TPU cores." )
xmp.spawn(__UpperCamelCase , args=__UpperCamelCase , nprocs=__UpperCamelCase , start_method='fork' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on one CPU.' )
function(*__UpperCamelCase )
else:
if num_processes is None:
raise ValueError(
'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '
'inside your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if torch.cuda.is_initialized():
raise ValueError(
'To launch a multi-GPU training from your notebook, you need to avoid running any instruction '
'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '
'function.' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__UpperCamelCase , master_addr='127.0.01' , master_port=__UpperCamelCase , mixed_precision=__UpperCamelCase ):
__snake_case : Dict = PrepareForLaunch(__UpperCamelCase , distributed_type='MULTI_GPU' )
print(F"Launching training on {num_processes} GPUs." )
try:
start_processes(__UpperCamelCase , args=__UpperCamelCase , nprocs=__UpperCamelCase , start_method='fork' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '
'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '
'Please review your imports and test them when running the `notebook_launcher()` to identify '
'which one is problematic.' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
__snake_case : Tuple = '1'
print('Launching training on MPS.' )
elif torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on CPU.' )
function(*__UpperCamelCase )
def __UpperCAmelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str=() , UpperCAmelCase_ : Union[str, Any]=2 ) -> List[str]:
'''simple docstring'''
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__UpperCamelCase , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ):
__snake_case : List[str] = PrepareForLaunch(__UpperCamelCase , debug=__UpperCamelCase )
start_processes(__UpperCamelCase , args=__UpperCamelCase , nprocs=__UpperCamelCase , start_method='fork' )
| 172 |
'''simple docstring'''
import argparse
import struct
import unittest
class a_ :
def __init__( self , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
UpperCamelCase = data
# Initialize hash values
UpperCamelCase = [
0x6A_09_E6_67,
0xBB_67_AE_85,
0x3C_6E_F3_72,
0xA5_4F_F5_3A,
0x51_0E_52_7F,
0x9B_05_68_8C,
0x1F_83_D9_AB,
0x5B_E0_CD_19,
]
# Initialize round constants
UpperCamelCase = [
0x42_8A_2F_98,
0x71_37_44_91,
0xB5_C0_FB_CF,
0xE9_B5_DB_A5,
0x39_56_C2_5B,
0x59_F1_11_F1,
0x92_3F_82_A4,
0xAB_1C_5E_D5,
0xD8_07_AA_98,
0x12_83_5B_01,
0x24_31_85_BE,
0x55_0C_7D_C3,
0x72_BE_5D_74,
0x80_DE_B1_FE,
0x9B_DC_06_A7,
0xC1_9B_F1_74,
0xE4_9B_69_C1,
0xEF_BE_47_86,
0x0F_C1_9D_C6,
0x24_0C_A1_CC,
0x2D_E9_2C_6F,
0x4A_74_84_AA,
0x5C_B0_A9_DC,
0x76_F9_88_DA,
0x98_3E_51_52,
0xA8_31_C6_6D,
0xB0_03_27_C8,
0xBF_59_7F_C7,
0xC6_E0_0B_F3,
0xD5_A7_91_47,
0x06_CA_63_51,
0x14_29_29_67,
0x27_B7_0A_85,
0x2E_1B_21_38,
0x4D_2C_6D_FC,
0x53_38_0D_13,
0x65_0A_73_54,
0x76_6A_0A_BB,
0x81_C2_C9_2E,
0x92_72_2C_85,
0xA2_BF_E8_A1,
0xA8_1A_66_4B,
0xC2_4B_8B_70,
0xC7_6C_51_A3,
0xD1_92_E8_19,
0xD6_99_06_24,
0xF4_0E_35_85,
0x10_6A_A0_70,
0x19_A4_C1_16,
0x1E_37_6C_08,
0x27_48_77_4C,
0x34_B0_BC_B5,
0x39_1C_0C_B3,
0x4E_D8_AA_4A,
0x5B_9C_CA_4F,
0x68_2E_6F_F3,
0x74_8F_82_EE,
0x78_A5_63_6F,
0x84_C8_78_14,
0x8C_C7_02_08,
0x90_BE_FF_FA,
0xA4_50_6C_EB,
0xBE_F9_A3_F7,
0xC6_71_78_F2,
]
UpperCamelCase = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def A__ ( _SCREAMING_SNAKE_CASE ) -> bytes:
"""simple docstring"""
UpperCamelCase = B"""\x80""" + (B"""\x00""" * (63 - (len(_SCREAMING_SNAKE_CASE ) + 8) % 64))
UpperCamelCase = struct.pack(""">Q""" , (len(_SCREAMING_SNAKE_CASE ) * 8) )
return data + padding + big_endian_integer
def A__ ( self ) -> None:
"""simple docstring"""
UpperCamelCase = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
UpperCamelCase = list(struct.unpack(""">16L""" , _SCREAMING_SNAKE_CASE ) )
# add 48 0-ed integers
words += [0] * 48
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
UpperCamelCase = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
UpperCamelCase = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
UpperCamelCase = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_00_00_00_00
# Compression
UpperCamelCase = self.ror(_SCREAMING_SNAKE_CASE , 6 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 11 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 25 )
UpperCamelCase = (e & f) ^ ((~e & 0xFF_FF_FF_FF) & g)
UpperCamelCase = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_00_00_00_00
UpperCamelCase = self.ror(_SCREAMING_SNAKE_CASE , 2 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 13 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 22 )
UpperCamelCase = (a & b) ^ (a & c) ^ (b & c)
UpperCamelCase = (sa + maj) % 0x1_00_00_00_00
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = (
g,
f,
e,
((d + tempa) % 0x1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0x1_00_00_00_00),
)
UpperCamelCase = [a, b, c, d, e, f, g, h]
# Modify final values
UpperCamelCase = [
((element + mutated_hash_values[index]) % 0x1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
UpperCamelCase = """""".join([hex(_SCREAMING_SNAKE_CASE )[2:].zfill(8 ) for value in self.hashes] )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return 0xFF_FF_FF_FF & (value << (32 - rotations)) | (value >> rotations)
class a_ ( unittest.TestCase ):
def A__ ( self ) -> None:
"""simple docstring"""
import hashlib
UpperCamelCase = bytes("""Test String""" , """utf-8""" )
self.assertEqual(SHAaaa(_SCREAMING_SNAKE_CASE ).hash , hashlib.shaaaa(_SCREAMING_SNAKE_CASE ).hexdigest() )
def lowercase__ ( )-> None:
import doctest
doctest.testmod()
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
UpperCamelCase = parser.parse_args()
UpperCamelCase = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
UpperCamelCase = f.read()
else:
UpperCamelCase = bytes(__UpperCamelCase , """utf-8""" )
print(SHAaaa(__UpperCamelCase ).hash )
if __name__ == "__main__":
main()
| 321 | 0 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
a__ = Lock()
def __UpperCAmelCase ( __a : int ,__a : Optional[int] ,__a : List[Any] ,__a : Dict ,__a : int ,__a : int ,__a : List[Any] ) -> Optional[int]:
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 ,10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(__UpperCamelCase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
_a : str = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
_a : Union[str, Any] = min(__UpperCamelCase ,__UpperCamelCase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(__UpperCamelCase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
_a : Tuple = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
_a : List[Any] = max(__UpperCamelCase ,__UpperCamelCase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(__UpperCamelCase )
def __UpperCAmelCase ( __a : str ) -> Tuple:
"""simple docstring"""
_a : Dict = []
_a : Optional[Any] = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
_a : List[str] = Pipe()
_a : List[Any] = Pipe()
process_array_.append(
Process(
target=__UpperCamelCase ,args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) ,) )
_a : Tuple = temp_rs
_a : int = temp_rr
for i in range(1 ,len(__UpperCamelCase ) - 1 ):
_a : Dict = Pipe()
_a : int = Pipe()
process_array_.append(
Process(
target=__UpperCamelCase ,args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) ,) )
_a : Optional[Any] = temp_rs
_a : Optional[Any] = temp_rr
process_array_.append(
Process(
target=__UpperCamelCase ,args=(
len(__UpperCamelCase ) - 1,
arr[len(__UpperCamelCase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(__UpperCamelCase ) - 1],
) ,) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 ,len(__UpperCamelCase ) ):
_a : Any = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __UpperCAmelCase ( ) -> Any:
"""simple docstring"""
_a : Any = list(range(10 ,0 ,-1 ) )
print('''Initial List''' )
print(*__UpperCamelCase )
_a : List[str] = odd_even_transposition(__UpperCamelCase )
print('''Sorted List\n''' )
print(*__UpperCamelCase )
if __name__ == "__main__":
main()
| 235 |
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
SCREAMING_SNAKE_CASE__ = _symbol_database.Default()
SCREAMING_SNAKE_CASE__ = _descriptor_pool.Default().AddSerializedFile(
b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'
)
SCREAMING_SNAKE_CASE__ = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = b'H\003'
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
SCREAMING_SNAKE_CASE__ = 4_5
SCREAMING_SNAKE_CASE__ = 1_5_8_1
SCREAMING_SNAKE_CASE__ = 1_5_1_7
SCREAMING_SNAKE_CASE__ = 1_5_7_0
SCREAMING_SNAKE_CASE__ = 1_5_8_4
SCREAMING_SNAKE_CASE__ = 1_7_9_3
SCREAMING_SNAKE_CASE__ = 1_7_9_5
SCREAMING_SNAKE_CASE__ = 1_9_1_6
SCREAMING_SNAKE_CASE__ = 1_8_6_4
SCREAMING_SNAKE_CASE__ = 1_9_0_5
SCREAMING_SNAKE_CASE__ = 1_9_1_9
SCREAMING_SNAKE_CASE__ = 2_4_2_9
SCREAMING_SNAKE_CASE__ = 2_2_0_8
SCREAMING_SNAKE_CASE__ = 2_4_1_8
SCREAMING_SNAKE_CASE__ = 2_3_2_3
SCREAMING_SNAKE_CASE__ = 2_4_0_7
# @@protoc_insertion_point(module_scope)
| 321 | 0 |
"""simple docstring"""
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Tuple = ("""dense.weight""", """attention.self.query""", """attention.self.key""", """attention.self.value""")
SCREAMING_SNAKE_CASE__ : Tuple = (
("""layer.""", """layer_"""),
("""word_embeddings.weight""", """word_embeddings"""),
("""position_embeddings.weight""", """position_embeddings"""),
("""token_type_embeddings.weight""", """token_type_embeddings"""),
(""".""", """/"""),
("""LayerNorm/weight""", """LayerNorm/gamma"""),
("""LayerNorm/bias""", """LayerNorm/beta"""),
("""weight""", """kernel"""),
)
if not os.path.isdir(__UpperCamelCase ):
os.makedirs(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = model.state_dict()
def to_tf_var_name(__lowerCAmelCase ):
for patt, repl in iter(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ : Dict = name.replace(__UpperCamelCase , __UpperCamelCase )
return F'''bert/{name}'''
def create_tf_var(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.dtypes.as_dtype(tensor.dtype )
SCREAMING_SNAKE_CASE__ : Tuple = tf.get_variable(dtype=__UpperCamelCase , shape=tensor.shape , name=__UpperCamelCase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__UpperCamelCase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = to_tf_var_name(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ : Dict = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
SCREAMING_SNAKE_CASE__ : Dict = torch_tensor.T
SCREAMING_SNAKE_CASE__ : List[str] = create_tf_var(tensor=__UpperCamelCase , name=__UpperCamelCase , session=__UpperCamelCase )
tf.keras.backend.set_value(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = session.run(__UpperCamelCase )
print(F'''Successfully created {tf_name}: {np.allclose(__UpperCamelCase , __UpperCamelCase )}''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.train.Saver(tf.trainable_variables() )
saver.save(__UpperCamelCase , os.path.join(__UpperCamelCase , model_name.replace("""-""" , """_""" ) + """.ckpt""" ) )
def _lowercase ( __lowerCAmelCase=None ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser()
parser.add_argument("""--model_name""" , type=__UpperCamelCase , required=__UpperCamelCase , help="""model name e.g. bert-base-uncased""" )
parser.add_argument(
"""--cache_dir""" , type=__UpperCamelCase , default=__UpperCamelCase , required=__UpperCamelCase , help="""Directory containing pytorch model""" )
parser.add_argument("""--pytorch_model_path""" , type=__UpperCamelCase , required=__UpperCamelCase , help="""/path/to/<pytorch-model-name>.bin""" )
parser.add_argument("""--tf_cache_dir""" , type=__UpperCamelCase , required=__UpperCamelCase , help="""Directory in which to save tensorflow model""" )
SCREAMING_SNAKE_CASE__ : Any = parser.parse_args(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__UpperCamelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 132 |
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 8.31_44_62 # Unit - J mol-1 K-1
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 321 | 0 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 182 |
'''simple docstring'''
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
SCREAMING_SNAKE_CASE__ = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
SCREAMING_SNAKE_CASE__ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase__ ( __UpperCamelCase )-> str:
if "://" in dataset_path:
UpperCamelCase = dataset_path.split("""://""" )[1]
return dataset_path
def lowercase__ ( __UpperCamelCase )-> bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> int:
UpperCamelCase = not is_remote_filesystem(__UpperCamelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__UpperCamelCase ) , fs._strip_protocol(__UpperCamelCase ) )
else:
fs.mv(__UpperCamelCase , __UpperCamelCase , recursive=__UpperCamelCase )
def lowercase__ ( )-> None:
if hasattr(fsspec.asyn , """reset_lock""" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = threading.Lock()
| 321 | 0 |
'''simple docstring'''
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> List[Any]:
__lowerCamelCase : Any = ''
for i in table:
res += inp[i - 1]
return res
def a_ ( _lowerCAmelCase ) -> List[str]:
return data[1:] + data[0]
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Optional[int]:
__lowerCamelCase : Dict = ''
for i in range(len(__UpperCamelCase ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Tuple:
__lowerCamelCase : List[str] = int('0b' + data[0] + data[-1] ,2 )
__lowerCamelCase : Tuple = int('0b' + data[1:3] ,2 )
return bin(s[row][col] )[2:]
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> Tuple:
__lowerCamelCase : List[str] = message[:4]
__lowerCamelCase : Optional[Any] = message[4:]
__lowerCamelCase : Any = apply_table(__UpperCamelCase ,__UpperCamelCase )
__lowerCamelCase : Optional[int] = xor(__UpperCamelCase ,__UpperCamelCase )
__lowerCamelCase : Dict = apply_sbox(__UpperCamelCase ,temp[:4] ) # noqa: E741
__lowerCamelCase : Any = apply_sbox(__UpperCamelCase ,temp[4:] )
__lowerCamelCase : List[str] = '0' * (2 - len(__UpperCamelCase )) + l # noqa: E741
__lowerCamelCase : Optional[int] = '0' * (2 - len(__UpperCamelCase )) + r
__lowerCamelCase : Optional[int] = apply_table(l + r ,__UpperCamelCase )
__lowerCamelCase : Dict = xor(__UpperCamelCase ,__UpperCamelCase )
return temp + right
if __name__ == "__main__":
_UpperCamelCase = input('Enter 10 bit key: ')
_UpperCamelCase = input('Enter 8 bit message: ')
_UpperCamelCase = [6, 3, 7, 4, 8, 5, 10, 9]
_UpperCamelCase = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
_UpperCamelCase = [2, 4, 3, 1]
_UpperCamelCase = [2, 6, 3, 1, 4, 8, 5, 7]
_UpperCamelCase = [4, 1, 3, 5, 7, 2, 8, 6]
_UpperCamelCase = [4, 1, 2, 3, 2, 3, 4, 1]
_UpperCamelCase = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_UpperCamelCase = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_UpperCamelCase = apply_table(key, paa_table)
_UpperCamelCase = temp[:5]
_UpperCamelCase = temp[5:]
_UpperCamelCase = left_shift(left)
_UpperCamelCase = left_shift(right)
_UpperCamelCase = apply_table(left + right, pa_table)
_UpperCamelCase = left_shift(left)
_UpperCamelCase = left_shift(right)
_UpperCamelCase = left_shift(left)
_UpperCamelCase = left_shift(right)
_UpperCamelCase = apply_table(left + right, pa_table)
# encryption
_UpperCamelCase = apply_table(message, IP)
_UpperCamelCase = function(expansion, sa, sa, keya, temp)
_UpperCamelCase = temp[4:] + temp[:4]
_UpperCamelCase = function(expansion, sa, sa, keya, temp)
_UpperCamelCase = apply_table(temp, IP_inv)
print('Cipher text is:', CT)
# decryption
_UpperCamelCase = apply_table(CT, IP)
_UpperCamelCase = function(expansion, sa, sa, keya, temp)
_UpperCamelCase = temp[4:] + temp[:4]
_UpperCamelCase = function(expansion, sa, sa, keya, temp)
_UpperCamelCase = apply_table(temp, IP_inv)
print('Plain text after decypting is:', PT)
| 208 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
'configuration_xlm_roberta_xl': [
'XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaXLConfig',
'XLMRobertaXLOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaXLForCausalLM',
'XLMRobertaXLForMaskedLM',
'XLMRobertaXLForMultipleChoice',
'XLMRobertaXLForQuestionAnswering',
'XLMRobertaXLForSequenceClassification',
'XLMRobertaXLForTokenClassification',
'XLMRobertaXLModel',
'XLMRobertaXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 321 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
def lowercase_ ( self :Optional[Any] ) -> int:
'''simple docstring'''
__A = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'width_multiplier' ) )
class UpperCamelCase__ :
def __init__( self :Tuple , _A :str , _A :Dict=13 , _A :List[str]=64 , _A :str=2 , _A :Optional[int]=3 , _A :Dict="swish" , _A :Any=3 , _A :str=32 , _A :Optional[Any]=0.1 , _A :int=0.02 , _A :Any=True , _A :Any=True , _A :Union[str, Any]=10 , _A :Dict=None , _A :Tuple=0.25 , _A :int=0.0 , _A :Any=0.0 , ) -> Optional[int]:
'''simple docstring'''
__A = parent
__A = batch_size
__A = image_size
__A = patch_size
__A = num_channels
__A = make_divisible(512 * width_multiplier , divisor=8 )
__A = hidden_act
__A = conv_kernel_size
__A = output_stride
__A = classifier_dropout_prob
__A = use_labels
__A = is_training
__A = num_labels
__A = initializer_range
__A = scope
__A = width_multiplier
__A = ffn_dropout
__A = attn_dropout
def lowercase_ ( self :Any ) -> Tuple:
'''simple docstring'''
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] , self.num_labels )
__A = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__A = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase_ ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def lowercase_ ( self :str , _A :Optional[Any] , _A :List[Any] , _A :Any , _A :Tuple ) -> Union[str, Any]:
'''simple docstring'''
__A = MobileViTVaModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__A = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase_ ( self :Dict , _A :int , _A :Optional[Any] , _A :Tuple , _A :int ) -> List[str]:
'''simple docstring'''
__A = self.num_labels
__A = MobileViTVaForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__A = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self :Union[str, Any] , _A :Optional[Any] , _A :Any , _A :str , _A :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
__A = self.num_labels
__A = MobileViTVaForSemanticSegmentation(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__A = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__A = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase_ ( self :str ) -> Optional[int]:
'''simple docstring'''
__A = self.prepare_config_and_inputs()
__A , __A , __A , __A = config_and_inputs
__A = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase):
UpperCAmelCase__ : Optional[Any] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCAmelCase__ : List[str] = (
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : Union[str, Any] = False
def lowercase_ ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
__A = MobileViTVaModelTester(self )
__A = MobileViTVaConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE )
def lowercase_ ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViTV2 does not use inputs_embeds' )
def lowercase_ ( self :str ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='MobileViTV2 does not support input and output embeddings' )
def lowercase_ ( self :Dict ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason='MobileViTV2 does not output attentions' )
def lowercase_ ( self :Dict ) -> str:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' )
def lowercase_ ( self :Dict ) -> str:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
pass
def lowercase_ ( self :List[Any] ) -> str:
'''simple docstring'''
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(_SCREAMING_SNAKE_CASE )
__A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ['pixel_values']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def lowercase_ ( self :Any ) -> Any:
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def lowercase_ ( self :str ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(_A :Union[str, Any] , _A :Union[str, Any] , _A :Union[str, Any] ):
__A = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__A = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
__A = outputs.hidden_states
__A = 5
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__A = 2
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowercase_ ( self :Dict ) -> List[str]:
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
def lowercase_ ( self :List[Any] ) -> str:
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_SCREAMING_SNAKE_CASE )
@slow
def lowercase_ ( self :Any ) -> List[str]:
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = MobileViTVaModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def snake_case ( )-> str:
"""simple docstring"""
__A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase):
@cached_property
def lowercase_ ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' )
if is_vision_available()
else None
)
@slow
def lowercase_ ( self :int ) -> str:
'''simple docstring'''
__A = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to(
_SCREAMING_SNAKE_CASE )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
__A = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
__A = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
__A = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def lowercase_ ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
__A = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
__A = model.to(_SCREAMING_SNAKE_CASE )
__A = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
__A = prepare_img()
__A = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
__A = model(**_SCREAMING_SNAKE_CASE )
__A = outputs.logits
# verify the logits
__A = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _SCREAMING_SNAKE_CASE )
__A = torch.tensor(
[
[[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]],
[[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]],
[[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]],
] , device=_SCREAMING_SNAKE_CASE , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def lowercase_ ( self :Optional[Any] ) -> List[Any]:
'''simple docstring'''
__A = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
__A = model.to(_SCREAMING_SNAKE_CASE )
__A = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
__A = prepare_img()
__A = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
__A = model(**_SCREAMING_SNAKE_CASE )
__A = outputs.logits.detach().cpu()
__A = image_processor.post_process_semantic_segmentation(outputs=_SCREAMING_SNAKE_CASE , target_sizes=[(50, 60)] )
__A = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _SCREAMING_SNAKE_CASE )
__A = image_processor.post_process_semantic_segmentation(outputs=_SCREAMING_SNAKE_CASE )
__A = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _SCREAMING_SNAKE_CASE )
| 161 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
SCREAMING_SNAKE_CASE__ = 'docs/source/en/_toctree.yml'
def lowercase__ ( __UpperCamelCase )-> Optional[Any]:
UpperCamelCase = defaultdict(__UpperCamelCase )
UpperCamelCase = []
UpperCamelCase = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} )
else:
new_doc_list.append(__UpperCamelCase )
UpperCamelCase = new_doc_list
UpperCamelCase = [key for key, value in counts.items() if value > 1]
UpperCamelCase = []
for duplicate_key in duplicates:
UpperCamelCase = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} )
if len(__UpperCamelCase ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] )
UpperCamelCase = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(__UpperCamelCase ) > 1:
raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" )
overview_doc.extend(__UpperCamelCase )
# Sort
return overview_doc
def lowercase__ ( __UpperCamelCase=False )-> List[str]:
with open(__UpperCamelCase , encoding="""utf-8""" ) as f:
UpperCamelCase = yaml.safe_load(f.read() )
# Get to the API doc
UpperCamelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCamelCase = content[api_idx]["""sections"""]
# Then to the model doc
UpperCamelCase = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
UpperCamelCase = api_doc[scheduler_idx]["""sections"""]
UpperCamelCase = clean_doc_toc(__UpperCamelCase )
UpperCamelCase = False
if new_scheduler_doc != scheduler_doc:
UpperCamelCase = True
if overwrite:
UpperCamelCase = new_scheduler_doc
if diff:
if overwrite:
UpperCamelCase = api_doc
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(__UpperCamelCase , allow_unicode=__UpperCamelCase ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
def lowercase__ ( __UpperCamelCase=False )-> Tuple:
with open(__UpperCamelCase , encoding="""utf-8""" ) as f:
UpperCamelCase = yaml.safe_load(f.read() )
# Get to the API doc
UpperCamelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCamelCase = content[api_idx]["""sections"""]
# Then to the model doc
UpperCamelCase = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
UpperCamelCase = False
UpperCamelCase = api_doc[pipeline_idx]["""sections"""]
UpperCamelCase = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
UpperCamelCase = pipeline_doc["""section"""]
UpperCamelCase = clean_doc_toc(__UpperCamelCase )
if overwrite:
UpperCamelCase = new_sub_pipeline_doc
new_pipeline_docs.append(__UpperCamelCase )
# sort overall pipeline doc
UpperCamelCase = clean_doc_toc(__UpperCamelCase )
if new_pipeline_docs != pipeline_docs:
UpperCamelCase = True
if overwrite:
UpperCamelCase = new_pipeline_docs
if diff:
if overwrite:
UpperCamelCase = api_doc
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(__UpperCamelCase , allow_unicode=__UpperCamelCase ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 321 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
UpperCamelCase__ :Dict = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = '''sshleifer/tiny-gpt2'''
UpperCamelCase__ :Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
UpperCamelCase__ :List[Any] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
UpperCamelCase__ :Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = '''sgugger/tiny-distilbert-classification'''
UpperCamelCase__ :List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , only_pretrain_model=_SCREAMING_SNAKE_CASE , )
UpperCamelCase__ :str = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
UpperCamelCase__ :int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = '''sshleifer/tiny-gpt2'''
UpperCamelCase__ :Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
UpperCamelCase__ :List[str] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
UpperCamelCase__ :str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = '''sshleifer/tiny-gpt2'''
UpperCamelCase__ :Tuple = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase__ :str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
UpperCamelCase__ :str = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , [config] )
UpperCamelCase__ :List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = '''sshleifer/tiny-gpt2'''
UpperCamelCase__ :Tuple = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase__ :List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
UpperCamelCase__ :Optional[int] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , [config] )
UpperCamelCase__ :int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = '''sshleifer/tiny-gpt2'''
UpperCamelCase__ :Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
UpperCamelCase__ :Any = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
UpperCamelCase__ :List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = '''sshleifer/tiny-gpt2'''
UpperCamelCase__ :Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase__ :Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
UpperCamelCase__ :Optional[int] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , [config] )
UpperCamelCase__ :Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = '''patrickvonplaten/t5-tiny-random'''
UpperCamelCase__ :Any = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase__ :List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
UpperCamelCase__ :Optional[Any] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , configs=[config] )
UpperCamelCase__ :Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = '''sshleifer/tiny-gpt2'''
UpperCamelCase__ :Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , use_xla=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
UpperCamelCase__ :Tuple = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
UpperCamelCase__ :int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ :str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_SCREAMING_SNAKE_CASE , save_to_csv=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , '''env.csv''' ) , multi_process=_SCREAMING_SNAKE_CASE , )
UpperCamelCase__ :Optional[Any] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
benchmark.run()
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , '''env.csv''' ) ).exists() )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(UpperCamelCase_ ):
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , '''sequential''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , '''cumulative''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , '''current''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ :Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_SCREAMING_SNAKE_CASE , '''log.txt''' ) , log_print=_SCREAMING_SNAKE_CASE , trace_memory_line_by_line=_SCREAMING_SNAKE_CASE , eager_mode=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
UpperCamelCase__ :Any = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
UpperCamelCase__ :Dict = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , '''log.txt''' ) ).exists() ) | 97 |
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[str]:
UpperCamelCase = 1.5
UpperCamelCase = int(factor * num_class_images )
UpperCamelCase = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=__UpperCamelCase , aesthetic_weight=0.1 )
os.makedirs(F"{class_data_dir}/images" , exist_ok=__UpperCamelCase )
if len(list(Path(F"{class_data_dir}/images" ).iterdir() ) ) >= num_class_images:
return
while True:
UpperCamelCase = client.query(text=__UpperCamelCase )
if len(__UpperCamelCase ) >= factor * num_class_images or num_images > 1E4:
break
else:
UpperCamelCase = int(factor * num_images )
UpperCamelCase = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=__UpperCamelCase , aesthetic_weight=0.1 , )
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = tqdm(desc="""downloading real regularization images""" , total=__UpperCamelCase )
with open(F"{class_data_dir}/caption.txt" , """w""" ) as fa, open(F"{class_data_dir}/urls.txt" , """w""" ) as fa, open(
F"{class_data_dir}/images.txt" , """w""" ) as fa:
while total < num_class_images:
UpperCamelCase = class_images[count]
count += 1
try:
UpperCamelCase = requests.get(images["""url"""] )
if img.status_code == 200:
UpperCamelCase = Image.open(BytesIO(img.content ) )
with open(F"{class_data_dir}/images/{total}.jpg" , """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(F"{class_data_dir}/images/{total}.jpg" + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowercase__ ( )-> str:
UpperCamelCase = argparse.ArgumentParser("""""" , add_help=__UpperCamelCase )
parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=__UpperCamelCase , type=__UpperCamelCase )
parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=__UpperCamelCase , type=__UpperCamelCase )
parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=__UpperCamelCase )
return parser.parse_args()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 321 | 0 |
def _a ( lowerCamelCase ):
if isinstance(__UpperCamelCase, __UpperCamelCase ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if isinstance(__UpperCamelCase, __UpperCamelCase ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if num == 0:
return "0b0"
lowerCamelCase : Any = False
if num < 0:
lowerCamelCase : Optional[int] = True
lowerCamelCase : Tuple = -num
lowerCamelCase : Tuple = []
while num > 0:
binary.insert(0, num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(__UpperCamelCase ) for e in binary )
return "0b" + "".join(str(__UpperCamelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 287 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
@dataclass
class a_ :
lowercase = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
lowercase = field(
default=lowerCamelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
lowercase = field(
default=lowerCamelCase , metadata={"""help""": """The column name of the images in the files."""} )
lowercase = field(default=lowerCamelCase , metadata={"""help""": """A folder containing the training data."""} )
lowercase = field(default=lowerCamelCase , metadata={"""help""": """A folder containing the validation data."""} )
lowercase = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
lowercase = field(
default=lowerCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowercase = field(
default=lowerCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = {}
if self.train_dir is not None:
UpperCamelCase = self.train_dir
if self.validation_dir is not None:
UpperCamelCase = self.validation_dir
UpperCamelCase = data_files if data_files else None
@dataclass
class a_ :
lowercase = field(
default=lowerCamelCase , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
lowercase = field(
default=lowerCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
lowercase = field(
default=lowerCamelCase , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
lowercase = field(
default=lowerCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
lowercase = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowercase = field(default=lowerCamelCase , metadata={"""help""": """Name or path of preprocessor config."""} )
lowercase = field(
default=lowerCamelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
lowercase = field(
default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
lowercase = field(
default=lowerCamelCase , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class a_ ( lowerCamelCase ):
lowercase = field(
default=1E-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def lowercase__ ( __UpperCamelCase )-> int:
UpperCamelCase = torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def lowercase__ ( )-> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase ,UpperCamelCase ,UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase ,UpperCamelCase ,UpperCamelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , __UpperCamelCase , __UpperCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase = training_args.get_process_log_level()
logger.setLevel(__UpperCamelCase )
transformers.utils.logging.set_verbosity(__UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
UpperCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
UpperCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCamelCase = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __UpperCamelCase ) and data_args.train_val_split > 0.0:
UpperCamelCase = ds["""train"""].train_test_split(data_args.train_val_split )
UpperCamelCase = split["""train"""]
UpperCamelCase = split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
UpperCamelCase = ViTMAEConfig.from_pretrained(model_args.config_name , **__UpperCamelCase )
elif model_args.model_name_or_path:
UpperCamelCase = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__UpperCamelCase )
else:
UpperCamelCase = ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(F"New config: {config}" )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
UpperCamelCase = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__UpperCamelCase )
elif model_args.model_name_or_path:
UpperCamelCase = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__UpperCamelCase )
else:
UpperCamelCase = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
UpperCamelCase = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
UpperCamelCase = ViTMAEForPreTraining(__UpperCamelCase )
if training_args.do_train:
UpperCamelCase = ds["""train"""].column_names
else:
UpperCamelCase = ds["""validation"""].column_names
if data_args.image_column_name is not None:
UpperCamelCase = data_args.image_column_name
elif "image" in column_names:
UpperCamelCase = """image"""
elif "img" in column_names:
UpperCamelCase = """img"""
else:
UpperCamelCase = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
UpperCamelCase = image_processor.size["""shortest_edge"""]
else:
UpperCamelCase = (image_processor.size["""height"""], image_processor.size["""width"""])
UpperCamelCase = Compose(
[
Lambda(lambda __UpperCamelCase : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(__UpperCamelCase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(__UpperCamelCase ):
UpperCamelCase = [transforms(__UpperCamelCase ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
UpperCamelCase = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__UpperCamelCase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
UpperCamelCase = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__UpperCamelCase )
# Compute absolute learning rate
UpperCamelCase = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
UpperCamelCase = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
UpperCamelCase = Trainer(
model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=__UpperCamelCase , data_collator=__UpperCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase = last_checkpoint
UpperCamelCase = trainer.train(resume_from_checkpoint=__UpperCamelCase )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase = trainer.evaluate()
trainer.log_metrics("""eval""" , __UpperCamelCase )
trainer.save_metrics("""eval""" , __UpperCamelCase )
# Write model card and (optionally) push to hub
UpperCamelCase = {
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__UpperCamelCase )
else:
trainer.create_model_card(**__UpperCamelCase )
def lowercase__ ( __UpperCamelCase )-> List[str]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 321 | 0 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=5_1_2,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def _a ( SCREAMING_SNAKE_CASE : List[str] ) -> Any:
"""simple docstring"""
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f'''could not parse string as bool {string}''' )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
_a = parser.parse_args()
_a = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 322 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A_ ( snake_case__ ):
_lowercase : int = (DPMSolverSinglestepScheduler,)
_lowercase : Optional[Any] = (('num_inference_steps', 2_5),)
def UpperCAmelCase ( self : Dict , **UpperCAmelCase : List[Any] ) -> Optional[Any]:
__lowerCAmelCase: Union[str, Any] = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'prediction_type': 'epsilon',
'thresholding': False,
'sample_max_value': 1.0,
'algorithm_type': 'dpmsolver++',
'solver_type': 'midpoint',
'lambda_min_clipped': -float('inf' ),
'variance_type': None,
}
config.update(**UpperCAmelCase )
return config
def UpperCAmelCase ( self : str , UpperCAmelCase : List[Any]=0 , **UpperCAmelCase : str ) -> Any:
__lowerCAmelCase: Optional[int] = dict(self.forward_default_kwargs )
__lowerCAmelCase: int = kwargs.pop('num_inference_steps' , UpperCAmelCase )
__lowerCAmelCase: int = self.dummy_sample
__lowerCAmelCase: Union[str, Any] = 0.1 * sample
__lowerCAmelCase: str = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase: Union[str, Any] = self.get_scheduler_config(**UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals
__lowerCAmelCase: Optional[int] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase )
__lowerCAmelCase: Dict = scheduler_class.from_pretrained(UpperCAmelCase )
new_scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals
__lowerCAmelCase: Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase , __lowerCAmelCase: Optional[int] = sample, sample
for t in range(UpperCAmelCase , time_step + scheduler.config.solver_order + 1 ):
__lowerCAmelCase: str = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
__lowerCAmelCase: str = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self : str ) -> str:
pass
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : Any=0 , **UpperCAmelCase : Optional[int] ) -> Tuple:
__lowerCAmelCase: Tuple = dict(self.forward_default_kwargs )
__lowerCAmelCase: Tuple = kwargs.pop('num_inference_steps' , UpperCAmelCase )
__lowerCAmelCase: Tuple = self.dummy_sample
__lowerCAmelCase: Union[str, Any] = 0.1 * sample
__lowerCAmelCase: Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase: Dict = self.get_scheduler_config()
__lowerCAmelCase: Any = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
__lowerCAmelCase: List[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase )
__lowerCAmelCase: List[str] = scheduler_class.from_pretrained(UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
__lowerCAmelCase: Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase: Any = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
__lowerCAmelCase: Dict = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self : int , UpperCAmelCase : Dict=None , **UpperCAmelCase : List[str] ) -> Union[str, Any]:
if scheduler is None:
__lowerCAmelCase: str = self.scheduler_classes[0]
__lowerCAmelCase: int = self.get_scheduler_config(**UpperCAmelCase )
__lowerCAmelCase: Any = scheduler_class(**UpperCAmelCase )
__lowerCAmelCase: List[Any] = self.scheduler_classes[0]
__lowerCAmelCase: List[str] = self.get_scheduler_config(**UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = scheduler_class(**UpperCAmelCase )
__lowerCAmelCase: List[Any] = 1_0
__lowerCAmelCase: Dict = self.dummy_model()
__lowerCAmelCase: Dict = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase: Dict = model(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: List[Any] = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
return sample
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
__lowerCAmelCase: List[str] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__lowerCAmelCase: Any = 5_0
__lowerCAmelCase: int = self.dummy_model()
__lowerCAmelCase: List[str] = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
__lowerCAmelCase: List[Any] = model(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: List[Any] = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
__lowerCAmelCase: Optional[int] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2574 ) < 1E-3
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
__lowerCAmelCase: List[str] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__lowerCAmelCase: Dict = self.full_loop(scheduler=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
__lowerCAmelCase: Tuple = DEISMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase: List[str] = DPMSolverMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase: Any = UniPCMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase: Optional[int] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__lowerCAmelCase: Union[str, Any] = self.full_loop(scheduler=UpperCAmelCase )
__lowerCAmelCase: List[Any] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def UpperCAmelCase ( self : List[str] ) -> List[str]:
self.check_over_configs(thresholding=UpperCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , algorithm_type='dpmsolver++' , solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , )
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> str:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , prediction_type=UpperCAmelCase , algorithm_type=UpperCAmelCase , )
__lowerCAmelCase: Dict = self.full_loop(
solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , prediction_type=UpperCAmelCase , algorithm_type=UpperCAmelCase , )
assert not torch.isnan(UpperCAmelCase ).any(), "Samples have nan numbers"
def UpperCAmelCase ( self : Optional[Any] ) -> str:
self.check_over_configs(lower_order_final=UpperCAmelCase )
self.check_over_configs(lower_order_final=UpperCAmelCase )
def UpperCAmelCase ( self : str ) -> Any:
self.check_over_configs(lambda_min_clipped=-float('inf' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def UpperCAmelCase ( self : List[Any] ) -> str:
self.check_over_configs(variance_type=UpperCAmelCase )
self.check_over_configs(variance_type='learned_range' )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=UpperCAmelCase , time_step=0 )
def UpperCAmelCase ( self : Any ) -> int:
__lowerCAmelCase: Any = self.full_loop()
__lowerCAmelCase: Tuple = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase: List[str] = self.full_loop(use_karras_sigmas=UpperCAmelCase )
__lowerCAmelCase: str = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2248 ) < 1E-3
def UpperCAmelCase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase: Tuple = self.full_loop(prediction_type='v_prediction' )
__lowerCAmelCase: List[str] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.1453 ) < 1E-3
def UpperCAmelCase ( self : str ) -> List[str]:
__lowerCAmelCase: int = self.full_loop(prediction_type='v_prediction' , use_karras_sigmas=UpperCAmelCase )
__lowerCAmelCase: Tuple = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.0649 ) < 1E-3
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
__lowerCAmelCase: Any = self.scheduler_classes[0]
__lowerCAmelCase: Optional[Any] = self.get_scheduler_config(thresholding=UpperCAmelCase , dynamic_thresholding_ratio=0 )
__lowerCAmelCase: List[str] = scheduler_class(**UpperCAmelCase )
__lowerCAmelCase: Optional[int] = 1_0
__lowerCAmelCase: Union[str, Any] = self.dummy_model()
__lowerCAmelCase: int = self.dummy_sample_deter.half()
scheduler.set_timesteps(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase: Any = model(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Any = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
| 322 | 1 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json''',
}
class A_ ( snake_case__ ):
_lowercase : Any = 'mvp'
_lowercase : Dict = ['past_key_values']
_lowercase : Tuple = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Union[str, Any] , UpperCAmelCase : List[str]=5_0_2_6_7 , UpperCAmelCase : int=1_0_2_4 , UpperCAmelCase : List[str]=1_2 , UpperCAmelCase : Tuple=4_0_9_6 , UpperCAmelCase : List[str]=1_6 , UpperCAmelCase : Tuple=1_2 , UpperCAmelCase : int=4_0_9_6 , UpperCAmelCase : Optional[Any]=1_6 , UpperCAmelCase : Any=0.0 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : str="gelu" , UpperCAmelCase : Dict=1_0_2_4 , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : str=0.0 , UpperCAmelCase : Union[str, Any]=0.0 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : str=0.0 , UpperCAmelCase : str=False , UpperCAmelCase : Any=True , UpperCAmelCase : int=1 , UpperCAmelCase : Tuple=0 , UpperCAmelCase : int=2 , UpperCAmelCase : Any=True , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : Any=2 , UpperCAmelCase : Dict=False , UpperCAmelCase : Union[str, Any]=1_0_0 , UpperCAmelCase : Any=8_0_0 , **UpperCAmelCase : List[Any] , ) -> Union[str, Any]:
__lowerCAmelCase: Dict = vocab_size
__lowerCAmelCase: str = max_position_embeddings
__lowerCAmelCase: Tuple = d_model
__lowerCAmelCase: Dict = encoder_ffn_dim
__lowerCAmelCase: Optional[Any] = encoder_layers
__lowerCAmelCase: Optional[int] = encoder_attention_heads
__lowerCAmelCase: Optional[int] = decoder_ffn_dim
__lowerCAmelCase: Optional[int] = decoder_layers
__lowerCAmelCase: Optional[int] = decoder_attention_heads
__lowerCAmelCase: Tuple = dropout
__lowerCAmelCase: int = attention_dropout
__lowerCAmelCase: Dict = activation_dropout
__lowerCAmelCase: Dict = activation_function
__lowerCAmelCase: Union[str, Any] = init_std
__lowerCAmelCase: Optional[Any] = encoder_layerdrop
__lowerCAmelCase: Dict = decoder_layerdrop
__lowerCAmelCase: Dict = classifier_dropout
__lowerCAmelCase: Dict = use_cache
__lowerCAmelCase: Union[str, Any] = encoder_layers
__lowerCAmelCase: Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCAmelCase: Optional[Any] = use_prompt
__lowerCAmelCase: List[str] = prompt_length
__lowerCAmelCase: Union[str, Any] = prompt_mid_dim
super().__init__(
pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , is_encoder_decoder=UpperCAmelCase , decoder_start_token_id=UpperCAmelCase , forced_eos_token_id=UpperCAmelCase , **UpperCAmelCase , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , UpperCAmelCase ):
__lowerCAmelCase: Optional[int] = self.bos_token_id
warnings.warn(
F'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
'The config can simply be saved and uploaded again to be fixed.' )
| 322 |
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def _a ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: Union[str, Any] = int(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: List[str] = t // 36_00, (t // 60) % 60, t % 60
return f'''{h}:{m:02d}:{s:02d}''' if h != 0 else f'''{m:02d}:{s:02d}'''
def _a ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str=3_00 ) -> int:
"""simple docstring"""
return f'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def _a ( SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: List[str] = '<table border="1" class="dataframe">\n'
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
__lowerCAmelCase: List[Any] = f'''{elt:.6f}''' if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else str(SCREAMING_SNAKE_CASE )
html_code += f''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class A_ :
_lowercase : str = 5
_lowercase : str = 0.2
def __init__( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Optional["NotebookTrainingTracker"] = None , UpperCAmelCase : int = 3_0_0 , ) -> List[Any]:
__lowerCAmelCase: List[str] = total
__lowerCAmelCase: Optional[int] = '' if prefix is None else prefix
__lowerCAmelCase: int = leave
__lowerCAmelCase: List[str] = parent
__lowerCAmelCase: Optional[Any] = width
__lowerCAmelCase: List[str] = None
__lowerCAmelCase: Dict = None
__lowerCAmelCase: List[str] = None
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : bool = False , UpperCAmelCase : str = None ) -> Optional[int]:
__lowerCAmelCase: int = value
if comment is not None:
__lowerCAmelCase: Any = comment
if self.last_value is None:
__lowerCAmelCase: List[Any] = time.time()
__lowerCAmelCase: Any = value
__lowerCAmelCase: List[str] = None
__lowerCAmelCase: Dict = self.warmup
__lowerCAmelCase: List[str] = 1
self.update_bar(UpperCAmelCase )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
__lowerCAmelCase: Union[str, Any] = time.time()
__lowerCAmelCase: str = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
__lowerCAmelCase: Dict = self.elapsed_time / (value - self.start_value)
else:
__lowerCAmelCase: int = None
if value >= self.total:
__lowerCAmelCase: Any = self.total
__lowerCAmelCase: str = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
__lowerCAmelCase: List[str] = self.average_time_per_item * (self.total - value)
self.update_bar(UpperCAmelCase )
__lowerCAmelCase: Tuple = value
__lowerCAmelCase: int = current_time
if self.average_time_per_item is None:
__lowerCAmelCase: Optional[int] = 1
else:
__lowerCAmelCase: Optional[Any] = max(int(self.update_every / self.average_time_per_item ) , 1 )
def UpperCAmelCase ( self : int , UpperCAmelCase : Any , UpperCAmelCase : List[Any]=None ) -> Union[str, Any]:
__lowerCAmelCase: int = ' ' * (len(str(self.total ) ) - len(str(UpperCAmelCase ) )) + str(UpperCAmelCase )
if self.elapsed_time is None:
__lowerCAmelCase: Dict = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
__lowerCAmelCase: str = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
__lowerCAmelCase: Any = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
F''' {format_time(self.predicted_remaining )}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]'''
self.display()
def UpperCAmelCase ( self : Any ) -> Optional[Any]:
__lowerCAmelCase: Any = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
__lowerCAmelCase: Tuple = disp.display(disp.HTML(self.html_code ) , display_id=UpperCAmelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def UpperCAmelCase ( self : str ) -> Optional[Any]:
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('' ) )
class A_ ( snake_case__ ):
def __init__( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[Any]=None ) -> Any:
super().__init__(UpperCAmelCase )
__lowerCAmelCase: Tuple = None if column_names is None else [column_names]
__lowerCAmelCase: Union[str, Any] = None
def UpperCAmelCase ( self : Union[str, Any] ) -> Any:
__lowerCAmelCase: str = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
__lowerCAmelCase: Optional[Any] = disp.display(disp.HTML(self.html_code ) , display_id=UpperCAmelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : List[Any] ) -> Dict:
if self.inner_table is None:
__lowerCAmelCase: List[str] = [list(values.keys() ), list(values.values() )]
else:
__lowerCAmelCase: Any = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(UpperCAmelCase )
__lowerCAmelCase: List[Any] = columns
self.inner_table.append([values[c] for c in columns] )
def UpperCAmelCase ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : List[Any]=None , UpperCAmelCase : List[str]=3_0_0 ) -> List[Any]:
__lowerCAmelCase: Union[str, Any] = NotebookProgressBar(UpperCAmelCase , prefix=UpperCAmelCase , parent=self , width=UpperCAmelCase )
return self.child_bar
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
__lowerCAmelCase: Tuple = None
self.display()
class A_ ( snake_case__ ):
def __init__( self : Any ) -> List[str]:
__lowerCAmelCase: int = None
__lowerCAmelCase: Optional[int] = None
__lowerCAmelCase: str = False
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , **UpperCAmelCase : Tuple ) -> str:
__lowerCAmelCase: Tuple = 'Epoch' if args.evaluation_strategy == IntervalStrategy.EPOCH else 'Step'
__lowerCAmelCase: Optional[int] = 0
__lowerCAmelCase: Any = 0
__lowerCAmelCase: Tuple = [self.first_column] + ['Training Loss']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('Validation Loss' )
__lowerCAmelCase: List[Any] = NotebookTrainingTracker(state.max_steps , UpperCAmelCase )
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Union[str, Any] ) -> Any:
__lowerCAmelCase: Union[str, Any] = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=F'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
__lowerCAmelCase: Any = False
def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int=None , **UpperCAmelCase : Dict ) -> List[Any]:
if not has_length(UpperCAmelCase ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
__lowerCAmelCase: int = self.training_tracker.add_child(len(UpperCAmelCase ) )
else:
__lowerCAmelCase: List[str] = NotebookProgressBar(len(UpperCAmelCase ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , **UpperCAmelCase : int ) -> Union[str, Any]:
if self.prediction_bar is not None:
self.prediction_bar.close()
__lowerCAmelCase: Any = None
def UpperCAmelCase ( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int=None , **UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
__lowerCAmelCase: Union[str, Any] = {'Training Loss': logs['loss']}
# First column is necessarily Step sine we're not in epoch eval strategy
__lowerCAmelCase: Dict = state.global_step
self.training_tracker.write_line(UpperCAmelCase )
def UpperCAmelCase ( self : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple=None , **UpperCAmelCase : int ) -> List[str]:
if self.training_tracker is not None:
__lowerCAmelCase: Dict = {'Training Loss': 'No log', 'Validation Loss': 'No log'}
for log in reversed(state.log_history ):
if "loss" in log:
__lowerCAmelCase: List[str] = log['loss']
break
if self.first_column == "Epoch":
__lowerCAmelCase: int = int(state.epoch )
else:
__lowerCAmelCase: Tuple = state.global_step
__lowerCAmelCase: Optional[int] = 'eval'
for k in metrics:
if k.endswith('_loss' ):
__lowerCAmelCase: Union[str, Any] = re.sub(R'\_loss$' , '' , UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = metrics.pop('total_flos' , UpperCAmelCase )
__lowerCAmelCase: str = metrics.pop('epoch' , UpperCAmelCase )
__lowerCAmelCase: int = metrics.pop(F'''{metric_key_prefix}_runtime''' , UpperCAmelCase )
__lowerCAmelCase: List[Any] = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' , UpperCAmelCase )
__lowerCAmelCase: List[str] = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' , UpperCAmelCase )
__lowerCAmelCase: Tuple = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' , UpperCAmelCase )
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
__lowerCAmelCase: Tuple = v
else:
__lowerCAmelCase: int = k.split('_' )
__lowerCAmelCase: List[Any] = ' '.join([part.capitalize() for part in splits[1:]] )
__lowerCAmelCase: List[Any] = v
self.training_tracker.write_line(UpperCAmelCase )
self.training_tracker.remove_child()
__lowerCAmelCase: List[str] = None
# Evaluation takes a long time so we should force the next update.
__lowerCAmelCase: str = True
def UpperCAmelCase ( self : int , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[int] ) -> Optional[int]:
self.training_tracker.update(
state.global_step , comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = None
| 322 | 1 |
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
_a = logging.get_logger(__name__)
class A_ ( snake_case__ ):
_lowercase : int = ['input_features', 'attention_mask']
def __init__( self : str , UpperCAmelCase : List[Any]=8_0 , UpperCAmelCase : List[Any]=1_6_0_0_0 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : Optional[int]=1_0 , UpperCAmelCase : str=2_5 , UpperCAmelCase : Dict="hamming_window" , UpperCAmelCase : Optional[int]=32768.0 , UpperCAmelCase : Tuple=0.97 , UpperCAmelCase : Any=1.0 , UpperCAmelCase : Any=True , UpperCAmelCase : int=True , UpperCAmelCase : List[str]=False , **UpperCAmelCase : List[str] , ) -> int:
super().__init__(feature_size=UpperCAmelCase , sampling_rate=UpperCAmelCase , padding_value=UpperCAmelCase , **UpperCAmelCase )
__lowerCAmelCase: Dict = feature_size
__lowerCAmelCase: Dict = sampling_rate
__lowerCAmelCase: Tuple = padding_value
__lowerCAmelCase: Any = hop_length
__lowerCAmelCase: str = win_length
__lowerCAmelCase: List[str] = frame_signal_scale
__lowerCAmelCase: str = preemphasis_coeff
__lowerCAmelCase: Optional[int] = mel_floor
__lowerCAmelCase: Any = normalize_means
__lowerCAmelCase: Dict = normalize_vars
__lowerCAmelCase: Any = win_function
__lowerCAmelCase: Union[str, Any] = return_attention_mask
__lowerCAmelCase: Optional[int] = win_length * sampling_rate // 1_0_0_0
__lowerCAmelCase: Union[str, Any] = hop_length * sampling_rate // 1_0_0_0
__lowerCAmelCase: int = optimal_fft_length(self.sample_size )
__lowerCAmelCase: Tuple = (self.n_fft // 2) + 1
def UpperCAmelCase ( self : int , UpperCAmelCase : np.array ) -> np.ndarray:
if self.win_function == "hamming_window":
__lowerCAmelCase: Tuple = window_function(window_length=self.sample_size , name=self.win_function , periodic=UpperCAmelCase )
else:
__lowerCAmelCase: str = window_function(window_length=self.sample_size , name=self.win_function )
__lowerCAmelCase: Dict = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
__lowerCAmelCase: Any = spectrogram(
one_waveform * self.frame_signal_scale , window=UpperCAmelCase , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=UpperCAmelCase , preemphasis=self.preemphasis_coeff , mel_filters=UpperCAmelCase , mel_floor=self.mel_floor , log_mel='log' , )
return msfc_features.T
def UpperCAmelCase ( self : int , UpperCAmelCase : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Dict ) -> int:
# make sure we normalize float32 arrays
if self.normalize_means:
__lowerCAmelCase: str = x[:input_length].mean(axis=0 )
__lowerCAmelCase: str = np.subtract(UpperCAmelCase , UpperCAmelCase )
if self.normalize_vars:
__lowerCAmelCase: List[str] = x[:input_length].std(axis=0 )
__lowerCAmelCase: Dict = np.divide(UpperCAmelCase , UpperCAmelCase )
if input_length < x.shape[0]:
__lowerCAmelCase: Optional[Any] = padding_value
# make sure array is in float32
__lowerCAmelCase: Dict = x.astype(np.floataa )
return x
def UpperCAmelCase ( self : List[str] , UpperCAmelCase : List[np.ndarray] , UpperCAmelCase : Optional[np.ndarray] = None ) -> List[np.ndarray]:
__lowerCAmelCase: Tuple = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(UpperCAmelCase , UpperCAmelCase , self.padding_value ) for x, n in zip(UpperCAmelCase , UpperCAmelCase )]
def __call__( self : str , UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : Optional[int] = None , **UpperCAmelCase : Any , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
__lowerCAmelCase: Union[str, Any] = isinstance(UpperCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
__lowerCAmelCase: Any = is_batched_numpy or (
isinstance(UpperCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowerCAmelCase: List[Any] = [np.asarray(UpperCAmelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase , np.ndarray ):
__lowerCAmelCase: int = np.asarray(UpperCAmelCase , dtype=np.floataa )
elif isinstance(UpperCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowerCAmelCase: Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowerCAmelCase: Dict = [raw_speech]
# extract fbank features
__lowerCAmelCase: List[Any] = [self._extract_mfsc_features(UpperCAmelCase ) for one_waveform in raw_speech]
# convert into correct format for padding
__lowerCAmelCase: Union[str, Any] = BatchFeature({'input_features': features} )
__lowerCAmelCase: int = self.pad(
UpperCAmelCase , padding=UpperCAmelCase , max_length=UpperCAmelCase , truncation=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , **UpperCAmelCase , )
# make sure list is in array format
__lowerCAmelCase: Any = padded_inputs.get('input_features' )
if isinstance(input_features[0] , UpperCAmelCase ):
__lowerCAmelCase: Any = [np.asarray(UpperCAmelCase , dtype=np.floataa ) for feature in input_features]
__lowerCAmelCase: Optional[Any] = padded_inputs.get('attention_mask' )
if attention_mask is not None:
__lowerCAmelCase: List[Any] = [np.asarray(UpperCAmelCase , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
__lowerCAmelCase: Optional[Any] = (
np.array(UpperCAmelCase , dtype=np.intaa )
if self._get_padding_strategies(UpperCAmelCase , max_length=UpperCAmelCase ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
__lowerCAmelCase: str = self.normalize(
padded_inputs['input_features'] , attention_mask=UpperCAmelCase )
if return_tensors is not None:
__lowerCAmelCase: int = padded_inputs.convert_to_tensors(UpperCAmelCase )
return padded_inputs
| 322 |
import os
from datetime import datetime as dt
from github import Github
_a = [
'''good first issue''',
'''feature request''',
'''wip''',
]
def _a ( ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase: Dict = Github(os.environ['GITHUB_TOKEN'] )
__lowerCAmelCase: Tuple = g.get_repo('huggingface/accelerate' )
__lowerCAmelCase: str = repo.get_issues(state='open' )
for issue in open_issues:
__lowerCAmelCase: Optional[int] = sorted([comment for comment in issue.get_comments()] , key=lambda SCREAMING_SNAKE_CASE : i.created_at , reverse=SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Dict = comments[0] if len(SCREAMING_SNAKE_CASE ) > 0 else None
__lowerCAmelCase: Tuple = dt.utcnow()
__lowerCAmelCase: Optional[int] = (current_time - issue.updated_at).days
__lowerCAmelCase: str = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='closed' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 322 | 1 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
_a = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class A_ ( snake_case__ ):
def __init__( self : List[Any] , *UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Dict=None , UpperCAmelCase : List[str]=None , **UpperCAmelCase : int ) -> str:
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
__lowerCAmelCase: int = eval_examples
__lowerCAmelCase: List[str] = post_process_function
__lowerCAmelCase: str = quant_trainer_args
__lowerCAmelCase: Union[str, Any] = 1_2_8 # default number of calibration samples
def UpperCAmelCase ( self : Any , UpperCAmelCase : str=None ) -> str:
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('Trainer: calibration requires an calib_dataset.' )
__lowerCAmelCase: str = calib_dataset if calib_dataset is not None else self.calib_dataset
__lowerCAmelCase: List[Any] = self._remove_unused_columns(UpperCAmelCase , description='Calibration' )
return DataLoader(
UpperCAmelCase , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=UpperCAmelCase , )
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : Any=None ) -> Dict:
__lowerCAmelCase: int = self.train_dataset if calib_dataset is None else calib_dataset
__lowerCAmelCase: Dict = self.get_calib_dataloader(UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = self.model
quant_trainer.configure_model(UpperCAmelCase , self.quant_trainer_args , calib=UpperCAmelCase )
model.eval()
quant_trainer.enable_calibration(UpperCAmelCase )
logger.info('***** Running calibration *****' )
logger.info(F''' Num examples = {self.calib_num}''' )
logger.info(F''' Batch size = {calib_dataloader.batch_size}''' )
for step, inputs in enumerate(UpperCAmelCase ):
# Prediction step
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: Optional[Any] = self.prediction_step(UpperCAmelCase , UpperCAmelCase , prediction_loss_only=UpperCAmelCase )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(UpperCAmelCase , self.quant_trainer_args )
__lowerCAmelCase: List[Any] = model
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : int=None , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : str=None , UpperCAmelCase : str = "eval" ) -> List[Any]:
__lowerCAmelCase: List[str] = self.eval_dataset if eval_dataset is None else eval_dataset
__lowerCAmelCase: Dict = self.get_eval_dataloader(UpperCAmelCase )
__lowerCAmelCase: Optional[int] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__lowerCAmelCase: Optional[int] = self.compute_metrics
__lowerCAmelCase: Union[str, Any] = None
__lowerCAmelCase: int = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__lowerCAmelCase: Dict = eval_loop(
UpperCAmelCase , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase , )
finally:
__lowerCAmelCase: str = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
__lowerCAmelCase: Dict = self.post_process_function(UpperCAmelCase , UpperCAmelCase , output.predictions )
__lowerCAmelCase: Union[str, Any] = self.compute_metrics(UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__lowerCAmelCase: int = metrics.pop(UpperCAmelCase )
self.log(UpperCAmelCase )
else:
__lowerCAmelCase: str = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__lowerCAmelCase: str = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCAmelCase )
return metrics
def UpperCAmelCase ( self : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : int=None , UpperCAmelCase : str = "test" ) -> List[str]:
__lowerCAmelCase: str = self.get_test_dataloader(UpperCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
__lowerCAmelCase: List[str] = self.compute_metrics
__lowerCAmelCase: List[Any] = None
__lowerCAmelCase: List[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__lowerCAmelCase: Optional[int] = eval_loop(
UpperCAmelCase , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase , )
finally:
__lowerCAmelCase: Tuple = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
__lowerCAmelCase: Optional[Any] = self.post_process_function(UpperCAmelCase , UpperCAmelCase , output.predictions , 'predict' )
__lowerCAmelCase: List[Any] = self.compute_metrics(UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__lowerCAmelCase: Optional[int] = metrics.pop(UpperCAmelCase )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCAmelCase )
def UpperCAmelCase ( self : str , UpperCAmelCase : List[str]="./" ) -> int:
__lowerCAmelCase: Tuple = self.eval_dataset
__lowerCAmelCase: Optional[int] = self.get_eval_dataloader(UpperCAmelCase )
__lowerCAmelCase: List[str] = next(iter(UpperCAmelCase ) )
# saving device - to make it consistent
__lowerCAmelCase: str = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
# convert to tuple
__lowerCAmelCase: Dict = tuple(v.to(UpperCAmelCase ) for k, v in batch.items() )
logger.info('Converting model to be onnx compatible' )
from pytorch_quantization.nn import TensorQuantizer
__lowerCAmelCase: Optional[int] = True
__lowerCAmelCase: Any = self.model.to(UpperCAmelCase )
model.eval()
model.float()
__lowerCAmelCase: Optional[Any] = model.module if hasattr(UpperCAmelCase , 'module' ) else model
quant_trainer.configure_model(UpperCAmelCase , self.quant_trainer_args )
__lowerCAmelCase: str = os.path.join(UpperCAmelCase , 'model.onnx' )
logger.info(F'''exporting model to {output_model_file}''' )
__lowerCAmelCase: Any = {0: 'batch_size', 1: 'seq_len'}
torch.onnx.export(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , export_params=UpperCAmelCase , opset_version=1_3 , do_constant_folding=UpperCAmelCase , input_names=['input_ids', 'attention_mask', 'token_type_ids'] , output_names=['output_start_logits', 'output_end_logits'] , dynamic_axes={
'input_ids': axes,
'attention_mask': axes,
'token_type_ids': axes,
'output_start_logits': axes,
'output_end_logits': axes,
} , verbose=UpperCAmelCase , )
logger.info('onnx export finished' )
| 322 |
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 322 | 1 |
import pytest
_a = '''__dummy_dataset1__'''
_a = '''
import json
import os
import datasets
REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"
URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
]
)
),
"langs": datasets.Sequence(datasets.Value("string")),
"spans": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),
]
def _generate_examples(self, filepath):
with open(filepath, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
'''
@pytest.fixture
def _a ( ) -> List[Any]:
"""simple docstring"""
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def _a ( ) -> Dict:
"""simple docstring"""
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def _a ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict ) -> Dict:
"""simple docstring"""
__lowerCAmelCase: int = dataset_loading_script_name
__lowerCAmelCase: Dict = tmp_path / 'datasets' / script_name
script_dir.mkdir(parents=SCREAMING_SNAKE_CASE )
__lowerCAmelCase: str = script_dir / f'''{script_name}.py'''
with open(SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE )
return str(SCREAMING_SNAKE_CASE )
| 322 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class A_ ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : Tuple , UpperCAmelCase : float , UpperCAmelCase : Callable , UpperCAmelCase : int , UpperCAmelCase : float = 1.0 , UpperCAmelCase : str = None , ) -> Union[str, Any]:
super().__init__()
__lowerCAmelCase: Optional[Any] = initial_learning_rate
__lowerCAmelCase: str = warmup_steps
__lowerCAmelCase: Optional[int] = power
__lowerCAmelCase: str = decay_schedule_fn
__lowerCAmelCase: Tuple = name
def __call__( self : int , UpperCAmelCase : Dict ) -> Optional[int]:
with tf.name_scope(self.name or 'WarmUp' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
__lowerCAmelCase: List[str] = tf.cast(UpperCAmelCase , tf.floataa )
__lowerCAmelCase: Tuple = tf.cast(self.warmup_steps , tf.floataa )
__lowerCAmelCase: List[str] = global_step_float / warmup_steps_float
__lowerCAmelCase: List[str] = self.initial_learning_rate * tf.math.pow(UpperCAmelCase , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=UpperCAmelCase , )
def UpperCAmelCase ( self : Tuple ) -> int:
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _a ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : float = 0.9 , SCREAMING_SNAKE_CASE : float = 0.9_9_9 , SCREAMING_SNAKE_CASE : float = 1E-8 , SCREAMING_SNAKE_CASE : Optional[float] = None , SCREAMING_SNAKE_CASE : Optional[float] = None , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : float = 1.0 , SCREAMING_SNAKE_CASE : Optional[List[str]] = None , ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase: Tuple = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=SCREAMING_SNAKE_CASE , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=SCREAMING_SNAKE_CASE , )
if num_warmup_steps:
__lowerCAmelCase: Optional[int] = WarmUp(
initial_learning_rate=SCREAMING_SNAKE_CASE , decay_schedule_fn=SCREAMING_SNAKE_CASE , warmup_steps=SCREAMING_SNAKE_CASE , )
if weight_decay_rate > 0.0:
__lowerCAmelCase: List[Any] = AdamWeightDecay(
learning_rate=SCREAMING_SNAKE_CASE , weight_decay_rate=SCREAMING_SNAKE_CASE , beta_a=SCREAMING_SNAKE_CASE , beta_a=SCREAMING_SNAKE_CASE , epsilon=SCREAMING_SNAKE_CASE , clipnorm=SCREAMING_SNAKE_CASE , global_clipnorm=SCREAMING_SNAKE_CASE , exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] , include_in_weight_decay=SCREAMING_SNAKE_CASE , )
else:
__lowerCAmelCase: Dict = tf.keras.optimizers.Adam(
learning_rate=SCREAMING_SNAKE_CASE , beta_a=SCREAMING_SNAKE_CASE , beta_a=SCREAMING_SNAKE_CASE , epsilon=SCREAMING_SNAKE_CASE , clipnorm=SCREAMING_SNAKE_CASE , global_clipnorm=SCREAMING_SNAKE_CASE , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class A_ ( snake_case__ ):
def __init__( self : Tuple , UpperCAmelCase : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , UpperCAmelCase : float = 0.9 , UpperCAmelCase : float = 0.999 , UpperCAmelCase : float = 1E-7 , UpperCAmelCase : bool = False , UpperCAmelCase : float = 0.0 , UpperCAmelCase : Optional[List[str]] = None , UpperCAmelCase : Optional[List[str]] = None , UpperCAmelCase : str = "AdamWeightDecay" , **UpperCAmelCase : str , ) -> int:
super().__init__(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
__lowerCAmelCase: List[Any] = weight_decay_rate
__lowerCAmelCase: List[str] = include_in_weight_decay
__lowerCAmelCase: Optional[Any] = exclude_from_weight_decay
@classmethod
def UpperCAmelCase ( cls : str , UpperCAmelCase : Tuple ) -> Optional[int]:
__lowerCAmelCase: Union[str, Any] = {'WarmUp': WarmUp}
return super(UpperCAmelCase , cls ).from_config(UpperCAmelCase , custom_objects=UpperCAmelCase )
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
super(UpperCAmelCase , self )._prepare_local(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = tf.constant(
self.weight_decay_rate , name='adam_weight_decay_rate' )
def UpperCAmelCase ( self : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] ) -> List[str]:
__lowerCAmelCase: Dict = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , )
return tf.no_op()
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=None , **UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
__lowerCAmelCase , __lowerCAmelCase: Tuple = list(zip(*UpperCAmelCase ) )
return super(UpperCAmelCase , self ).apply_gradients(zip(UpperCAmelCase , UpperCAmelCase ) , name=UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self : str , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any ) -> str:
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
__lowerCAmelCase: Dict = apply_state or {}
__lowerCAmelCase: Union[str, Any] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
__lowerCAmelCase: str = self._fallback_apply_state(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Tuple = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def UpperCAmelCase ( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any]=None ) -> List[Any]:
__lowerCAmelCase , __lowerCAmelCase: Optional[int] = self._get_lr(var.device , var.dtype.base_dtype , UpperCAmelCase )
__lowerCAmelCase: Optional[int] = self._decay_weights_op(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
with tf.control_dependencies([decay] ):
return super(UpperCAmelCase , self )._resource_apply_dense(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : List[Any]=None ) -> List[str]:
__lowerCAmelCase , __lowerCAmelCase: Any = self._get_lr(var.device , var.dtype.base_dtype , UpperCAmelCase )
__lowerCAmelCase: str = self._decay_weights_op(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
with tf.control_dependencies([decay] ):
return super(UpperCAmelCase , self )._resource_apply_sparse(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
__lowerCAmelCase: List[str] = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate} )
return config
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(UpperCAmelCase , UpperCAmelCase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(UpperCAmelCase , UpperCAmelCase ) is not None:
return False
return True
class A_ ( snake_case__ ):
def __init__( self : int ) -> List[Any]:
__lowerCAmelCase: Tuple = []
__lowerCAmelCase: int = None
@property
def UpperCAmelCase ( self : Dict ) -> List[Any]:
if self._accum_steps is None:
__lowerCAmelCase: List[Any] = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=UpperCAmelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
if not self._gradients:
raise ValueError('The accumulator should be called first to initialize the gradients' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : Optional[Any] , UpperCAmelCase : Any ) -> Any:
if not self._gradients:
__lowerCAmelCase: Any = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(UpperCAmelCase ) , trainable=UpperCAmelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(UpperCAmelCase ) != len(self._gradients ):
raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(UpperCAmelCase )}''' )
for accum_gradient, gradient in zip(self._gradients , UpperCAmelCase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(UpperCAmelCase )
self._accum_steps.assign_add(1 )
def UpperCAmelCase ( self : int ) -> int:
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(UpperCAmelCase ) )
| 322 | 1 |
_a = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def _a ( SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any ) -> list[str]:
"""simple docstring"""
__lowerCAmelCase: int = set()
# keep track of all the paths to be checked
__lowerCAmelCase: str = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
__lowerCAmelCase: str = queue.pop(0 )
# get the last node from the path
__lowerCAmelCase: Union[str, Any] = path[-1]
if node not in explored:
__lowerCAmelCase: Dict = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
__lowerCAmelCase: Dict = list(SCREAMING_SNAKE_CASE )
new_path.append(SCREAMING_SNAKE_CASE )
queue.append(SCREAMING_SNAKE_CASE )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(SCREAMING_SNAKE_CASE )
# in case there's no path between the 2 nodes
return []
def _a ( SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any ) -> int:
"""simple docstring"""
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
__lowerCAmelCase: Optional[int] = [start]
__lowerCAmelCase: Dict = set(SCREAMING_SNAKE_CASE )
# Keep tab on distances from `start` node.
__lowerCAmelCase: Optional[int] = {start: 0, target: -1}
while queue:
__lowerCAmelCase: Any = queue.pop(0 )
if node == target:
__lowerCAmelCase: Optional[int] = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(SCREAMING_SNAKE_CASE )
queue.append(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Union[str, Any] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 322 |
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any]=[] ) -> str:
"""simple docstring"""
__lowerCAmelCase: Optional[int] = size[0] - overlap_pixels * 2
__lowerCAmelCase: str = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
__lowerCAmelCase: Any = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55
__lowerCAmelCase: int = np.pad(SCREAMING_SNAKE_CASE , mode='linear_ramp' , pad_width=SCREAMING_SNAKE_CASE , end_values=0 )
if "l" in remove_borders:
__lowerCAmelCase: Dict = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
__lowerCAmelCase: Tuple = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
__lowerCAmelCase: List[Any] = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
__lowerCAmelCase: List[str] = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def _a ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] ) -> List[Any]:
"""simple docstring"""
return max(SCREAMING_SNAKE_CASE , min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
def _a ( SCREAMING_SNAKE_CASE : [int] , SCREAMING_SNAKE_CASE : [int] , SCREAMING_SNAKE_CASE : [int] ) -> int:
"""simple docstring"""
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def _a ( SCREAMING_SNAKE_CASE : [int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : [int] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: Tuple = list(SCREAMING_SNAKE_CASE )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
__lowerCAmelCase: int = clamp_rect(SCREAMING_SNAKE_CASE , [0, 0] , [image_size[0], image_size[1]] )
return rect
def _a ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any:
"""simple docstring"""
__lowerCAmelCase: List[Any] = Image.new('RGB' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(SCREAMING_SNAKE_CASE , (original_slice, 0) )
return result
def _a ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any ) -> str:
"""simple docstring"""
__lowerCAmelCase: Union[str, Any] = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
__lowerCAmelCase: List[Any] = tile.crop(SCREAMING_SNAKE_CASE )
return tile
def _a ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase: List[str] = n % d
return n - divisor
class A_ ( snake_case__ ):
def __init__( self : Optional[Any] , UpperCAmelCase : AutoencoderKL , UpperCAmelCase : CLIPTextModel , UpperCAmelCase : CLIPTokenizer , UpperCAmelCase : UNetaDConditionModel , UpperCAmelCase : DDPMScheduler , UpperCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCAmelCase : int = 3_5_0 , ) -> Optional[Any]:
super().__init__(
vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , unet=UpperCAmelCase , low_res_scheduler=UpperCAmelCase , scheduler=UpperCAmelCase , max_noise_level=UpperCAmelCase , )
def UpperCAmelCase ( self : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : str , **UpperCAmelCase : List[Any] ) -> Optional[int]:
torch.manual_seed(0 )
__lowerCAmelCase: Optional[int] = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
__lowerCAmelCase: Optional[Any] = add_overlap_rect(UpperCAmelCase , UpperCAmelCase , image.size )
__lowerCAmelCase: Any = image.crop(UpperCAmelCase )
__lowerCAmelCase: Any = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
__lowerCAmelCase: Tuple = translated_slice_x - (original_image_slice / 2)
__lowerCAmelCase: Union[str, Any] = max(0 , UpperCAmelCase )
__lowerCAmelCase: Optional[int] = squeeze_tile(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = to_input.size
__lowerCAmelCase: List[Any] = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
__lowerCAmelCase: int = super(UpperCAmelCase , self ).__call__(image=UpperCAmelCase , **UpperCAmelCase ).images[0]
__lowerCAmelCase: Dict = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
__lowerCAmelCase: Union[str, Any] = unsqueeze_tile(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Optional[int] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
__lowerCAmelCase: Optional[int] = []
if x == 0:
remove_borders.append('l' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('r' )
if y == 0:
remove_borders.append('t' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('b' )
__lowerCAmelCase: int = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=UpperCAmelCase ) , mode='L' , )
final_image.paste(
UpperCAmelCase , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , UpperCAmelCase )
@torch.no_grad()
def __call__( self : Optional[Any] , UpperCAmelCase : Union[str, List[str]] , UpperCAmelCase : Union[PIL.Image.Image, List[PIL.Image.Image]] , UpperCAmelCase : int = 7_5 , UpperCAmelCase : float = 9.0 , UpperCAmelCase : int = 5_0 , UpperCAmelCase : Optional[Union[str, List[str]]] = None , UpperCAmelCase : Optional[int] = 1 , UpperCAmelCase : float = 0.0 , UpperCAmelCase : Optional[torch.Generator] = None , UpperCAmelCase : Optional[torch.FloatTensor] = None , UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase : int = 1 , UpperCAmelCase : int = 1_2_8 , UpperCAmelCase : int = 3_2 , UpperCAmelCase : int = 3_2 , ) -> str:
__lowerCAmelCase: List[Any] = Image.new('RGB' , (image.size[0] * 4, image.size[1] * 4) )
__lowerCAmelCase: str = math.ceil(image.size[0] / tile_size )
__lowerCAmelCase: List[Any] = math.ceil(image.size[1] / tile_size )
__lowerCAmelCase: Optional[Any] = tcx * tcy
__lowerCAmelCase: Tuple = 0
for y in range(UpperCAmelCase ):
for x in range(UpperCAmelCase ):
self._process_tile(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , prompt=UpperCAmelCase , num_inference_steps=UpperCAmelCase , guidance_scale=UpperCAmelCase , noise_level=UpperCAmelCase , negative_prompt=UpperCAmelCase , num_images_per_prompt=UpperCAmelCase , eta=UpperCAmelCase , generator=UpperCAmelCase , latents=UpperCAmelCase , )
current_count += 1
if callback is not None:
callback({'progress': current_count / total_tile_count, 'image': final_image} )
return final_image
def _a ( ) -> int:
"""simple docstring"""
__lowerCAmelCase: Any = 'stabilityai/stable-diffusion-x4-upscaler'
__lowerCAmelCase: Dict = StableDiffusionTiledUpscalePipeline.from_pretrained(SCREAMING_SNAKE_CASE , revision='fp16' , torch_dtype=torch.floataa )
__lowerCAmelCase: Optional[Any] = pipe.to('cuda' )
__lowerCAmelCase: Tuple = Image.open('../../docs/source/imgs/diffusers_library.jpg' )
def callback(SCREAMING_SNAKE_CASE : Tuple ):
print(f'''progress: {obj['progress']:.4f}''' )
obj["image"].save('diffusers_library_progress.jpg' )
__lowerCAmelCase: str = pipe(image=SCREAMING_SNAKE_CASE , prompt='Black font, white background, vector' , noise_level=40 , callback=SCREAMING_SNAKE_CASE )
final_image.save('diffusers_library.jpg' )
if __name__ == "__main__":
main()
| 322 | 1 |
from __future__ import annotations
from collections.abc import Callable
_a = list[list[float | int]]
def _a ( SCREAMING_SNAKE_CASE : Matrix , SCREAMING_SNAKE_CASE : Matrix ) -> Matrix:
"""simple docstring"""
__lowerCAmelCase: int = len(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Matrix = [[0 for _ in range(size + 1 )] for _ in range(SCREAMING_SNAKE_CASE )]
__lowerCAmelCase: int
__lowerCAmelCase: int
__lowerCAmelCase: int
__lowerCAmelCase: int
__lowerCAmelCase: int
__lowerCAmelCase: float
for row in range(SCREAMING_SNAKE_CASE ):
for col in range(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: Optional[Any] = matrix[row][col]
__lowerCAmelCase: Union[str, Any] = vector[row][0]
__lowerCAmelCase: List[str] = 0
__lowerCAmelCase: Tuple = 0
while row < size and col < size:
# pivoting
__lowerCAmelCase: int = max((abs(augmented[rowa][col] ), rowa) for rowa in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
__lowerCAmelCase , __lowerCAmelCase: Union[str, Any] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: List[Any] = augmented[rowa][col] / augmented[row][col]
__lowerCAmelCase: List[str] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , SCREAMING_SNAKE_CASE ):
for row in range(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: Dict = augmented[row][col] / augmented[col][col]
for cola in range(SCREAMING_SNAKE_CASE , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(SCREAMING_SNAKE_CASE )
]
def _a ( SCREAMING_SNAKE_CASE : list[int] ) -> Callable[[int], int]:
"""simple docstring"""
__lowerCAmelCase: int = len(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Matrix = [[0 for _ in range(SCREAMING_SNAKE_CASE )] for _ in range(SCREAMING_SNAKE_CASE )]
__lowerCAmelCase: Matrix = [[0] for _ in range(SCREAMING_SNAKE_CASE )]
__lowerCAmelCase: Matrix
__lowerCAmelCase: int
__lowerCAmelCase: int
__lowerCAmelCase: int
for x_val, y_val in enumerate(SCREAMING_SNAKE_CASE ):
for col in range(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: str = (x_val + 1) ** (size - col - 1)
__lowerCAmelCase: List[str] = y_val
__lowerCAmelCase: Any = solve(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def interpolated_func(SCREAMING_SNAKE_CASE : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(SCREAMING_SNAKE_CASE ) )
return interpolated_func
def _a ( SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def _a ( SCREAMING_SNAKE_CASE : Callable[[int], int] = question_function , SCREAMING_SNAKE_CASE : int = 10 ) -> int:
"""simple docstring"""
__lowerCAmelCase: list[int] = [func(SCREAMING_SNAKE_CASE ) for x_val in range(1 , order + 1 )]
__lowerCAmelCase: list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
__lowerCAmelCase: int = 0
__lowerCAmelCase: Callable[[int], int]
__lowerCAmelCase: int
for poly in polynomials:
__lowerCAmelCase: Optional[Any] = 1
while func(SCREAMING_SNAKE_CASE ) == poly(SCREAMING_SNAKE_CASE ):
x_val += 1
ret += poly(SCREAMING_SNAKE_CASE )
return ret
if __name__ == "__main__":
print(f"{solution() = }")
| 322 |
def _a ( SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: str = len(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: List[Any] = sum(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: str = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
__lowerCAmelCase: Tuple = True
for i in range(1 , s + 1 ):
__lowerCAmelCase: Any = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
__lowerCAmelCase: Optional[int] = dp[i][j - 1]
if arr[i - 1] <= j:
__lowerCAmelCase: Union[str, Any] = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
__lowerCAmelCase: Tuple = s - 2 * j
break
return diff
| 322 | 1 |
import random
from .binary_exp_mod import bin_exp_mod
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any]=10_00 ) -> Optional[Any]:
"""simple docstring"""
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
__lowerCAmelCase: List[Any] = n - 1
__lowerCAmelCase: Optional[Any] = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
__lowerCAmelCase: List[str] = 0
while count < prec:
__lowerCAmelCase: int = random.randint(2 , n - 1 )
__lowerCAmelCase: Tuple = bin_exp_mod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if b != 1:
__lowerCAmelCase: Dict = True
for _ in range(SCREAMING_SNAKE_CASE ):
if b == n - 1:
__lowerCAmelCase: Optional[Any] = False
break
__lowerCAmelCase: Dict = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_a = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 322 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> list[int]:
"""simple docstring"""
__lowerCAmelCase: int = 0
__lowerCAmelCase: Tuple = len(SCREAMING_SNAKE_CASE ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
__lowerCAmelCase: Tuple = i + 1
else:
__lowerCAmelCase: List[str] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{two_pointer([2, 7, 1_1, 1_5], 9) = }")
| 322 | 1 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int=10_24 ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase: Dict = [], []
__lowerCAmelCase: Dict = list(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase , __lowerCAmelCase: Tuple = sorted_examples[0]
def is_too_big(SCREAMING_SNAKE_CASE : Tuple ):
return tok(SCREAMING_SNAKE_CASE , return_tensors='pt' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
__lowerCAmelCase: Optional[int] = new_src + ' ' + src
__lowerCAmelCase: Optional[int] = new_tgt + ' ' + tgt
if is_too_big(SCREAMING_SNAKE_CASE ) or is_too_big(SCREAMING_SNAKE_CASE ): # cant fit, finalize example
finished_src.append(SCREAMING_SNAKE_CASE )
finished_tgt.append(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: List[str] = src, tgt
else: # can fit, keep adding
__lowerCAmelCase , __lowerCAmelCase: List[str] = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(SCREAMING_SNAKE_CASE )
finished_tgt.append(SCREAMING_SNAKE_CASE )
return finished_src, finished_tgt
def _a ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Path , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase: Tuple = Path(SCREAMING_SNAKE_CASE )
save_path.mkdir(exist_ok=SCREAMING_SNAKE_CASE )
for split in ["train"]:
__lowerCAmelCase , __lowerCAmelCase: Union[str, Any] = data_dir / f'''{split}.source''', data_dir / f'''{split}.target'''
__lowerCAmelCase: int = [x.rstrip() for x in Path(SCREAMING_SNAKE_CASE ).open().readlines()]
__lowerCAmelCase: Union[str, Any] = [x.rstrip() for x in Path(SCREAMING_SNAKE_CASE ).open().readlines()]
__lowerCAmelCase , __lowerCAmelCase: int = pack_examples(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(f'''packed {split} split from {len(SCREAMING_SNAKE_CASE )} examples -> {len(SCREAMING_SNAKE_CASE )}.''' )
Path(save_path / f'''{split}.source''' ).open('w' ).write('\n'.join(SCREAMING_SNAKE_CASE ) )
Path(save_path / f'''{split}.target''' ).open('w' ).write('\n'.join(SCREAMING_SNAKE_CASE ) )
for split in ["val", "test"]:
__lowerCAmelCase , __lowerCAmelCase: int = data_dir / f'''{split}.source''', data_dir / f'''{split}.target'''
shutil.copyfile(SCREAMING_SNAKE_CASE , save_path / f'''{split}.source''' )
shutil.copyfile(SCREAMING_SNAKE_CASE , save_path / f'''{split}.target''' )
def _a ( ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase: List[Any] = argparse.ArgumentParser()
parser.add_argument('--tok_name' , type=SCREAMING_SNAKE_CASE , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('--max_seq_len' , type=SCREAMING_SNAKE_CASE , default=1_28 )
parser.add_argument('--data_dir' , type=SCREAMING_SNAKE_CASE )
parser.add_argument('--save_path' , type=SCREAMING_SNAKE_CASE )
__lowerCAmelCase: int = parser.parse_args()
__lowerCAmelCase: Dict = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(SCREAMING_SNAKE_CASE , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 322 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
_a = '''scheduler_config.json'''
class A_ ( snake_case__ ):
_lowercase : Optional[Any] = 1
_lowercase : Tuple = 2
_lowercase : Dict = 3
_lowercase : int = 4
_lowercase : Optional[Any] = 5
@dataclass
class A_ ( snake_case__ ):
_lowercase : jnp.ndarray
class A_ :
_lowercase : Optional[int] = SCHEDULER_CONFIG_NAME
_lowercase : Dict = ['dtype']
_lowercase : int = []
_lowercase : Union[str, Any] = True
@classmethod
def UpperCAmelCase ( cls : Union[str, Any] , UpperCAmelCase : Dict[str, Any] = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : List[str]=False , **UpperCAmelCase : Optional[int] , ) -> Tuple:
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = cls.load_config(
pretrained_model_name_or_path=UpperCAmelCase , subfolder=UpperCAmelCase , return_unused_kwargs=UpperCAmelCase , **UpperCAmelCase , )
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = cls.from_config(UpperCAmelCase , return_unused_kwargs=UpperCAmelCase , **UpperCAmelCase )
if hasattr(UpperCAmelCase , 'create_state' ) and getattr(UpperCAmelCase , 'has_state' , UpperCAmelCase ):
__lowerCAmelCase: Dict = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : Union[str, os.PathLike] , UpperCAmelCase : bool = False , **UpperCAmelCase : Any ) -> List[str]:
self.save_config(save_directory=UpperCAmelCase , push_to_hub=UpperCAmelCase , **UpperCAmelCase )
@property
def UpperCAmelCase ( self : str ) -> Dict:
return self._get_compatibles()
@classmethod
def UpperCAmelCase ( cls : Optional[int] ) -> Any:
__lowerCAmelCase: Optional[int] = list(set([cls.__name__] + cls._compatibles ) )
__lowerCAmelCase: Dict = importlib.import_module(__name__.split('.' )[0] )
__lowerCAmelCase: Dict = [
getattr(UpperCAmelCase , UpperCAmelCase ) for c in compatible_classes_str if hasattr(UpperCAmelCase , UpperCAmelCase )
]
return compatible_classes
def _a ( SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Tuple[int] ) -> jnp.ndarray:
"""simple docstring"""
assert len(SCREAMING_SNAKE_CASE ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(SCREAMING_SNAKE_CASE ) - x.ndim) ) , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any=0.9_9_9 , SCREAMING_SNAKE_CASE : List[Any]=jnp.floataa ) -> jnp.ndarray:
"""simple docstring"""
def alpha_bar(SCREAMING_SNAKE_CASE : str ):
return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
__lowerCAmelCase: str = []
for i in range(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: Union[str, Any] = i / num_diffusion_timesteps
__lowerCAmelCase: List[str] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(SCREAMING_SNAKE_CASE ) / alpha_bar(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) )
return jnp.array(SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )
@flax.struct.dataclass
class A_ :
_lowercase : jnp.ndarray
_lowercase : jnp.ndarray
_lowercase : jnp.ndarray
@classmethod
def UpperCAmelCase ( cls : str , UpperCAmelCase : Optional[int] ) -> Any:
__lowerCAmelCase: str = scheduler.config
if config.trained_betas is not None:
__lowerCAmelCase: Tuple = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
__lowerCAmelCase: Any = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__lowerCAmelCase: List[Any] = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__lowerCAmelCase: str = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' )
__lowerCAmelCase: Optional[Any] = 1.0 - betas
__lowerCAmelCase: Optional[Any] = jnp.cumprod(UpperCAmelCase , axis=0 )
return cls(
alphas=UpperCAmelCase , betas=UpperCAmelCase , alphas_cumprod=UpperCAmelCase , )
def _a ( SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ) -> int:
"""simple docstring"""
__lowerCAmelCase: Optional[int] = state.alphas_cumprod
__lowerCAmelCase: str = alphas_cumprod[timesteps] ** 0.5
__lowerCAmelCase: Any = sqrt_alpha_prod.flatten()
__lowerCAmelCase: Any = broadcast_to_shape_from_left(SCREAMING_SNAKE_CASE , original_samples.shape )
__lowerCAmelCase: Any = (1 - alphas_cumprod[timesteps]) ** 0.5
__lowerCAmelCase: str = sqrt_one_minus_alpha_prod.flatten()
__lowerCAmelCase: str = broadcast_to_shape_from_left(SCREAMING_SNAKE_CASE , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def _a ( SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ) -> str:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase: Union[str, Any] = get_sqrt_alpha_prod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def _a ( SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ) -> Any:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase: Tuple = get_sqrt_alpha_prod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: int = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 322 | 1 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A_ ( snake_case__ ):
_lowercase : Optional[int] = (DEISMultistepScheduler,)
_lowercase : str = (('num_inference_steps', 2_5),)
def UpperCAmelCase ( self : Optional[Any] , **UpperCAmelCase : Optional[int] ) -> List[str]:
__lowerCAmelCase: int = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
}
config.update(**UpperCAmelCase )
return config
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Tuple=0 , **UpperCAmelCase : Union[str, Any] ) -> List[Any]:
__lowerCAmelCase: int = dict(self.forward_default_kwargs )
__lowerCAmelCase: List[Any] = kwargs.pop('num_inference_steps' , UpperCAmelCase )
__lowerCAmelCase: Dict = self.dummy_sample
__lowerCAmelCase: Optional[Any] = 0.1 * sample
__lowerCAmelCase: List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase: str = self.get_scheduler_config(**UpperCAmelCase )
__lowerCAmelCase: Dict = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals
__lowerCAmelCase: Any = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase )
__lowerCAmelCase: int = scheduler_class.from_pretrained(UpperCAmelCase )
new_scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals
__lowerCAmelCase: Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase , __lowerCAmelCase: Tuple = sample, sample
for t in range(UpperCAmelCase , time_step + scheduler.config.solver_order + 1 ):
__lowerCAmelCase: int = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
__lowerCAmelCase: Tuple = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
pass
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Optional[int]=0 , **UpperCAmelCase : int ) -> List[str]:
__lowerCAmelCase: Dict = dict(self.forward_default_kwargs )
__lowerCAmelCase: List[Any] = kwargs.pop('num_inference_steps' , UpperCAmelCase )
__lowerCAmelCase: Optional[int] = self.dummy_sample
__lowerCAmelCase: Tuple = 0.1 * sample
__lowerCAmelCase: Any = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase: List[str] = self.get_scheduler_config()
__lowerCAmelCase: Optional[Any] = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
__lowerCAmelCase: Dict = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = scheduler_class.from_pretrained(UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
__lowerCAmelCase: Tuple = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase: Union[str, Any] = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
__lowerCAmelCase: int = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : Any=None , **UpperCAmelCase : Dict ) -> List[Any]:
if scheduler is None:
__lowerCAmelCase: Union[str, Any] = self.scheduler_classes[0]
__lowerCAmelCase: List[Any] = self.get_scheduler_config(**UpperCAmelCase )
__lowerCAmelCase: Any = scheduler_class(**UpperCAmelCase )
__lowerCAmelCase: List[Any] = self.scheduler_classes[0]
__lowerCAmelCase: str = self.get_scheduler_config(**UpperCAmelCase )
__lowerCAmelCase: Optional[int] = scheduler_class(**UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = 1_0
__lowerCAmelCase: int = self.dummy_model()
__lowerCAmelCase: int = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase: Any = model(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: int = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
return sample
def UpperCAmelCase ( self : List[str] ) -> int:
__lowerCAmelCase: Optional[Any] = dict(self.forward_default_kwargs )
__lowerCAmelCase: Optional[int] = kwargs.pop('num_inference_steps' , UpperCAmelCase )
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase: Dict = self.get_scheduler_config()
__lowerCAmelCase: int = scheduler_class(**UpperCAmelCase )
__lowerCAmelCase: Any = self.dummy_sample
__lowerCAmelCase: str = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCAmelCase , 'set_timesteps' ):
scheduler.set_timesteps(UpperCAmelCase )
elif num_inference_steps is not None and not hasattr(UpperCAmelCase , 'set_timesteps' ):
__lowerCAmelCase: List[str] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__lowerCAmelCase: List[str] = [residual + 0.2, residual + 0.15, residual + 0.10]
__lowerCAmelCase: int = dummy_past_residuals[: scheduler.config.solver_order]
__lowerCAmelCase: Dict = scheduler.timesteps[5]
__lowerCAmelCase: str = scheduler.timesteps[6]
__lowerCAmelCase: List[str] = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
__lowerCAmelCase: Union[str, Any] = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase ( self : Tuple ) -> Dict:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
__lowerCAmelCase: Union[str, Any] = DEISMultistepScheduler(**self.get_scheduler_config() )
__lowerCAmelCase: Union[str, Any] = self.full_loop(scheduler=UpperCAmelCase )
__lowerCAmelCase: int = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.23916 ) < 1E-3
__lowerCAmelCase: List[str] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__lowerCAmelCase: Union[str, Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase: str = UniPCMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase: Optional[Any] = DEISMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase: str = self.full_loop(scheduler=UpperCAmelCase )
__lowerCAmelCase: Tuple = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.23916 ) < 1E-3
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
self.check_over_configs(thresholding=UpperCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , algorithm_type='deis' , solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , )
def UpperCAmelCase ( self : Dict ) -> List[str]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> int:
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , prediction_type=UpperCAmelCase , algorithm_type=UpperCAmelCase , )
__lowerCAmelCase: List[Any] = self.full_loop(
solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , prediction_type=UpperCAmelCase , algorithm_type=UpperCAmelCase , )
assert not torch.isnan(UpperCAmelCase ).any(), "Samples have nan numbers"
def UpperCAmelCase ( self : List[str] ) -> Optional[int]:
self.check_over_configs(lower_order_final=UpperCAmelCase )
self.check_over_configs(lower_order_final=UpperCAmelCase )
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=UpperCAmelCase , time_step=0 )
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
__lowerCAmelCase: str = self.full_loop()
__lowerCAmelCase: int = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.23916 ) < 1E-3
def UpperCAmelCase ( self : Optional[int] ) -> Any:
__lowerCAmelCase: Tuple = self.full_loop(prediction_type='v_prediction' )
__lowerCAmelCase: Union[str, Any] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.091 ) < 1E-3
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
__lowerCAmelCase: Any = self.scheduler_classes[0]
__lowerCAmelCase: Union[str, Any] = self.get_scheduler_config(thresholding=UpperCAmelCase , dynamic_thresholding_ratio=0 )
__lowerCAmelCase: List[Any] = scheduler_class(**UpperCAmelCase )
__lowerCAmelCase: Any = 1_0
__lowerCAmelCase: Union[str, Any] = self.dummy_model()
__lowerCAmelCase: List[Any] = self.dummy_sample_deter.half()
scheduler.set_timesteps(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase: Any = model(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Tuple = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
| 322 |
_a = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def _a ( SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any ) -> list[str]:
"""simple docstring"""
__lowerCAmelCase: int = set()
# keep track of all the paths to be checked
__lowerCAmelCase: str = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
__lowerCAmelCase: str = queue.pop(0 )
# get the last node from the path
__lowerCAmelCase: Union[str, Any] = path[-1]
if node not in explored:
__lowerCAmelCase: Dict = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
__lowerCAmelCase: Dict = list(SCREAMING_SNAKE_CASE )
new_path.append(SCREAMING_SNAKE_CASE )
queue.append(SCREAMING_SNAKE_CASE )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(SCREAMING_SNAKE_CASE )
# in case there's no path between the 2 nodes
return []
def _a ( SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any ) -> int:
"""simple docstring"""
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
__lowerCAmelCase: Optional[int] = [start]
__lowerCAmelCase: Dict = set(SCREAMING_SNAKE_CASE )
# Keep tab on distances from `start` node.
__lowerCAmelCase: Optional[int] = {start: 0, target: -1}
while queue:
__lowerCAmelCase: Any = queue.pop(0 )
if node == target:
__lowerCAmelCase: Optional[int] = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(SCREAMING_SNAKE_CASE )
queue.append(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Union[str, Any] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 322 | 1 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
_a = version.parse(importlib_metadata.version('''nltk'''))
if NLTK_VERSION >= version.Version('''3.6.4'''):
from nltk import word_tokenize
_a = '''\
@inproceedings{banarjee2005,
title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},
author = {Banerjee, Satanjeev and Lavie, Alon},
booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},
month = jun,
year = {2005},
address = {Ann Arbor, Michigan},
publisher = {Association for Computational Linguistics},
url = {https://www.aclweb.org/anthology/W05-0909},
pages = {65--72},
}
'''
_a = '''\
METEOR, an automatic metric for machine translation evaluation
that is based on a generalized concept of unigram matching between the
machine-produced translation and human-produced reference translations.
Unigrams can be matched based on their surface forms, stemmed forms,
and meanings; furthermore, METEOR can be easily extended to include more
advanced matching strategies. Once all generalized unigram matches
between the two strings have been found, METEOR computes a score for
this matching using a combination of unigram-precision, unigram-recall, and
a measure of fragmentation that is designed to directly capture how
well-ordered the matched words in the machine translation are in relation
to the reference.
METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic
data and 0.331 on the Chinese data. This is shown to be an improvement on
using simply unigram-precision, unigram-recall and their harmonic F1
combination.
'''
_a = '''
Computes METEOR score of translated segments against one or more references.
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
alpha: Parameter for controlling relative weights of precision and recall. default: 0.9
beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3
gamma: Relative weight assigned to fragmentation penalty. default: 0.5
Returns:
\'meteor\': meteor score.
Examples:
>>> meteor = datasets.load_metric(\'meteor\')
>>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]
>>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]
>>> results = meteor.compute(predictions=predictions, references=references)
>>> print(round(results["meteor"], 4))
0.6944
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def UpperCAmelCase ( self : str , UpperCAmelCase : Dict ) -> Dict:
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def UpperCAmelCase ( self : str , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any]=0.9 , UpperCAmelCase : Any=3 , UpperCAmelCase : int=0.5 ) -> Union[str, Any]:
if NLTK_VERSION >= version.Version('3.6.5' ):
__lowerCAmelCase: Any = [
meteor_score.single_meteor_score(
word_tokenize(UpperCAmelCase ) , word_tokenize(UpperCAmelCase ) , alpha=UpperCAmelCase , beta=UpperCAmelCase , gamma=UpperCAmelCase )
for ref, pred in zip(UpperCAmelCase , UpperCAmelCase )
]
else:
__lowerCAmelCase: str = [
meteor_score.single_meteor_score(UpperCAmelCase , UpperCAmelCase , alpha=UpperCAmelCase , beta=UpperCAmelCase , gamma=UpperCAmelCase )
for ref, pred in zip(UpperCAmelCase , UpperCAmelCase )
]
return {"meteor": np.mean(UpperCAmelCase )}
| 322 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ ( snake_case__ ):
_lowercase : int = ['image_processor', 'tokenizer']
_lowercase : Union[str, Any] = 'LayoutLMv3ImageProcessor'
_lowercase : List[str] = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self : Any , UpperCAmelCase : Dict=None , UpperCAmelCase : Tuple=None , **UpperCAmelCase : Optional[Any] ) -> str:
__lowerCAmelCase: str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCAmelCase , )
__lowerCAmelCase: List[Any] = kwargs.pop('feature_extractor' )
__lowerCAmelCase: Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[str, TensorType]] = None , **UpperCAmelCase : Tuple , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
# first, apply the image processor
__lowerCAmelCase: str = self.image_processor(images=UpperCAmelCase , return_tensors=UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
__lowerCAmelCase: Tuple = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowerCAmelCase: List[str] = features['words']
__lowerCAmelCase: List[Any] = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
# add pixel values
__lowerCAmelCase: Tuple = features.pop('pixel_values' )
if return_overflowing_tokens is True:
__lowerCAmelCase: int = self.get_overflowing_images(UpperCAmelCase , encoded_inputs['overflow_to_sample_mapping'] )
__lowerCAmelCase: str = images
return encoded_inputs
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] ) -> List[str]:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
__lowerCAmelCase: str = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
F''' {len(UpperCAmelCase )} and {len(UpperCAmelCase )}''' )
return images_with_overflow
def UpperCAmelCase ( self : Optional[int] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Dict ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self : Any , *UpperCAmelCase : Dict , **UpperCAmelCase : Any ) -> List[str]:
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> str:
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCAmelCase , )
return self.image_processor_class
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , UpperCAmelCase , )
return self.image_processor
| 322 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 322 |
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
_a = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : tuple , SCREAMING_SNAKE_CASE : Path , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int]=False , ) -> str:
"""simple docstring"""
output_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE , output_names=SCREAMING_SNAKE_CASE , dynamic_axes=SCREAMING_SNAKE_CASE , do_constant_folding=SCREAMING_SNAKE_CASE , use_external_data_format=SCREAMING_SNAKE_CASE , enable_onnx_checker=SCREAMING_SNAKE_CASE , opset_version=SCREAMING_SNAKE_CASE , )
else:
export(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE , output_names=SCREAMING_SNAKE_CASE , dynamic_axes=SCREAMING_SNAKE_CASE , do_constant_folding=SCREAMING_SNAKE_CASE , opset_version=SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : bool = False ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: List[Any] = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__lowerCAmelCase: str = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
__lowerCAmelCase: Dict = 'cpu'
__lowerCAmelCase: Optional[int] = Path(SCREAMING_SNAKE_CASE )
# VAE DECODER
__lowerCAmelCase: Optional[Any] = AutoencoderKL.from_pretrained(model_path + '/vae' )
__lowerCAmelCase: Union[str, Any] = vae_decoder.config.latent_channels
# forward only through the decoder part
__lowerCAmelCase: Any = vae_decoder.decode
onnx_export(
SCREAMING_SNAKE_CASE , model_args=(
torch.randn(1 , SCREAMING_SNAKE_CASE , 25 , 25 ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=SCREAMING_SNAKE_CASE , )
del vae_decoder
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=1_4,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
_a = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('''SD: Done: ONNX''')
| 322 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class A_ ( snake_case__ ):
_lowercase : str = 'openai/whisper-base'
_lowercase : Union[str, Any] = (
'This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '
'transcribed text.'
)
_lowercase : Any = 'transcriber'
_lowercase : Dict = WhisperProcessor
_lowercase : List[Any] = WhisperForConditionalGeneration
_lowercase : List[str] = ['audio']
_lowercase : List[str] = ['text']
def UpperCAmelCase ( self : str , UpperCAmelCase : Optional[int] ) -> Dict:
return self.pre_processor(UpperCAmelCase , return_tensors='pt' ).input_features
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : List[str] ) -> Any:
return self.model.generate(inputs=UpperCAmelCase )
def UpperCAmelCase ( self : int , UpperCAmelCase : Union[str, Any] ) -> int:
return self.pre_processor.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )[0]
| 322 |
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square(SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__lowerCAmelCase: Union[str, Any] = update_area_of_max_square(SCREAMING_SNAKE_CASE , col + 1 )
__lowerCAmelCase: Tuple = update_area_of_max_square(row + 1 , col + 1 )
__lowerCAmelCase: int = update_area_of_max_square(row + 1 , SCREAMING_SNAKE_CASE )
if mat[row][col]:
__lowerCAmelCase: List[str] = 1 + min([right, diagonal, down] )
__lowerCAmelCase: List[str] = max(largest_square_area[0] , SCREAMING_SNAKE_CASE )
return sub_problem_sol
else:
return 0
__lowerCAmelCase: List[str] = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__lowerCAmelCase: List[Any] = update_area_of_max_square_using_dp_array(SCREAMING_SNAKE_CASE , col + 1 , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Union[str, Any] = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Any = update_area_of_max_square_using_dp_array(row + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if mat[row][col]:
__lowerCAmelCase: int = 1 + min([right, diagonal, down] )
__lowerCAmelCase: Union[str, Any] = max(largest_square_area[0] , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = sub_problem_sol
return sub_problem_sol
else:
return 0
__lowerCAmelCase: int = [0]
__lowerCAmelCase: int = [[-1] * cols for _ in range(SCREAMING_SNAKE_CASE )]
update_area_of_max_square_using_dp_array(0 , 0 , SCREAMING_SNAKE_CASE )
return largest_square_area[0]
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
"""simple docstring"""
__lowerCAmelCase: int = [[0] * (cols + 1) for _ in range(rows + 1 )]
__lowerCAmelCase: Optional[Any] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__lowerCAmelCase: Union[str, Any] = dp_array[row][col + 1]
__lowerCAmelCase: str = dp_array[row + 1][col + 1]
__lowerCAmelCase: Optional[int] = dp_array[row + 1][col]
if mat[row][col] == 1:
__lowerCAmelCase: Optional[Any] = 1 + min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: str = max(dp_array[row][col] , SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase: Dict = 0
return largest_square_area
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
"""simple docstring"""
__lowerCAmelCase: Tuple = [0] * (cols + 1)
__lowerCAmelCase: Optional[int] = [0] * (cols + 1)
__lowerCAmelCase: str = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__lowerCAmelCase: int = current_row[col + 1]
__lowerCAmelCase: Union[str, Any] = next_row[col + 1]
__lowerCAmelCase: Any = next_row[col]
if mat[row][col] == 1:
__lowerCAmelCase: str = 1 + min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: str = max(current_row[col] , SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase: Optional[Any] = 0
__lowerCAmelCase: int = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 322 | 1 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
_a = logging.getLogger()
def _a ( ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase: Dict = argparse.ArgumentParser()
parser.add_argument('-f' )
__lowerCAmelCase: List[Any] = parser.parse_args()
return args.f
class A_ ( snake_case__ ):
def UpperCAmelCase ( self : Any ) -> None:
__lowerCAmelCase: Union[str, Any] = logging.StreamHandler(sys.stdout )
logger.addHandler(UpperCAmelCase )
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : int ) -> Dict:
__lowerCAmelCase: int = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , 'run_glue_deebert.py' )
with patch.object(UpperCAmelCase , 'argv' , UpperCAmelCase ):
__lowerCAmelCase: int = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(UpperCAmelCase , 0.666 )
@slow
@require_torch_non_multi_gpu
def UpperCAmelCase ( self : Dict ) -> List[Any]:
__lowerCAmelCase: List[Any] = '\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n '.split()
self.run_and_check(UpperCAmelCase )
__lowerCAmelCase: List[Any] = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(UpperCAmelCase )
__lowerCAmelCase: Any = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(UpperCAmelCase )
| 322 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_a = '''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
_a = [
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: Optional[int] = SavedModel()
__lowerCAmelCase: str = []
with open(os.path.join(SCREAMING_SNAKE_CASE , 'utils' , 'tf_ops' , 'onnx.json' ) ) as f:
__lowerCAmelCase: List[str] = json.load(SCREAMING_SNAKE_CASE )['opsets']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(SCREAMING_SNAKE_CASE )] )
with open(SCREAMING_SNAKE_CASE , 'rb' ) as f:
saved_model.ParseFromString(f.read() )
__lowerCAmelCase: Optional[int] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
__lowerCAmelCase: List[str] = sorted(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(SCREAMING_SNAKE_CASE )
if strict and len(SCREAMING_SNAKE_CASE ) > 0:
raise Exception(f'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(SCREAMING_SNAKE_CASE ) > 0:
print(f'''Found the following incompatible ops for the opset {opset}:''' )
print(*SCREAMING_SNAKE_CASE , sep='\n' )
else:
print(f'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=1_2, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
_a = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 322 | 1 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case__ )
class A_ ( snake_case__ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
_lowercase : str = field(default='summarization' , metadata={'include_in_asdict_even_if_is_default': True} )
_lowercase : ClassVar[Features] = Features({'text': Value('string' )} )
_lowercase : ClassVar[Features] = Features({'summary': Value('string' )} )
_lowercase : str = "text"
_lowercase : str = "summary"
@property
def UpperCAmelCase ( self : Tuple ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 322 |
import math
import qiskit
def _a ( SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : int = 1 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if (
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(SCREAMING_SNAKE_CASE ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
__lowerCAmelCase: Union[str, Any] = qiskit.QuantumRegister(4 , 'qr' )
__lowerCAmelCase: List[Any] = qiskit.ClassicalRegister(2 , 'cr' )
# list the entries
__lowerCAmelCase: Any = [input_a, input_a, carry_in]
__lowerCAmelCase: List[str] = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(SCREAMING_SNAKE_CASE ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(SCREAMING_SNAKE_CASE ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(SCREAMING_SNAKE_CASE ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , SCREAMING_SNAKE_CASE ) # measure the last two qbits
__lowerCAmelCase: List[str] = qiskit.Aer.get_backend('aer_simulator' )
__lowerCAmelCase: List[Any] = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=10_00 )
return job.result().get_counts(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(f"Total sum count for state is: {quantum_full_adder(1, 1, 1)}")
| 322 | 1 |
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
_a = {
'''E''': 12.70,
'''T''': 9.06,
'''A''': 8.17,
'''O''': 7.51,
'''I''': 6.97,
'''N''': 6.75,
'''S''': 6.33,
'''H''': 6.09,
'''R''': 5.99,
'''D''': 4.25,
'''L''': 4.03,
'''C''': 2.78,
'''U''': 2.76,
'''M''': 2.41,
'''W''': 2.36,
'''F''': 2.23,
'''G''': 2.02,
'''Y''': 1.97,
'''P''': 1.93,
'''B''': 1.29,
'''V''': 0.98,
'''K''': 0.77,
'''J''': 0.15,
'''X''': 0.15,
'''Q''': 0.10,
'''Z''': 0.07,
}
_a = '''ETAOINSHRDLCUMWFGYPBVKJXQZ'''
_a = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def _a ( SCREAMING_SNAKE_CASE : str ) -> dict[str, int]:
"""simple docstring"""
__lowerCAmelCase: Dict = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def _a ( SCREAMING_SNAKE_CASE : tuple ) -> str:
"""simple docstring"""
return x[0]
def _a ( SCREAMING_SNAKE_CASE : str ) -> str:
"""simple docstring"""
__lowerCAmelCase: Optional[int] = get_letter_count(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=SCREAMING_SNAKE_CASE )
__lowerCAmelCase: int = ''.join(freq_to_letter[freq] )
__lowerCAmelCase: Any = list(freq_to_letter_str.items() )
freq_pairs.sort(key=SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE )
__lowerCAmelCase: list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : str ) -> int:
"""simple docstring"""
__lowerCAmelCase: Any = get_frequency_order(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: str = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 322 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A_ :
def __init__( self : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : int=3 , UpperCAmelCase : int=4 , UpperCAmelCase : str=2 , UpperCAmelCase : Union[str, Any]=7 , UpperCAmelCase : List[str]=True , UpperCAmelCase : Dict=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Optional[Any]=9_9 , UpperCAmelCase : Tuple=3_6 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Optional[int]=4 , UpperCAmelCase : Union[str, Any]=3_7 , UpperCAmelCase : Any="gelu" , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : List[str]=5_1_2 , UpperCAmelCase : int=1_6 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : Optional[Any]=0.02 , UpperCAmelCase : Optional[Any]=6 , UpperCAmelCase : int=6 , UpperCAmelCase : str=3 , UpperCAmelCase : Any=4 , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : List[str]=1_0_0_0 , ) -> int:
__lowerCAmelCase: List[str] = parent
__lowerCAmelCase: List[str] = batch_size
__lowerCAmelCase: Optional[Any] = num_channels
__lowerCAmelCase: Tuple = image_size
__lowerCAmelCase: str = patch_size
__lowerCAmelCase: List[str] = is_training
__lowerCAmelCase: Union[str, Any] = use_input_mask
__lowerCAmelCase: Union[str, Any] = use_token_type_ids
__lowerCAmelCase: Tuple = use_labels
__lowerCAmelCase: Optional[int] = vocab_size
__lowerCAmelCase: Any = hidden_size
__lowerCAmelCase: Tuple = num_hidden_layers
__lowerCAmelCase: Optional[int] = num_attention_heads
__lowerCAmelCase: Dict = intermediate_size
__lowerCAmelCase: Union[str, Any] = hidden_act
__lowerCAmelCase: str = hidden_dropout_prob
__lowerCAmelCase: str = attention_probs_dropout_prob
__lowerCAmelCase: str = max_position_embeddings
__lowerCAmelCase: str = type_vocab_size
__lowerCAmelCase: Optional[Any] = type_sequence_label_size
__lowerCAmelCase: Union[str, Any] = initializer_range
__lowerCAmelCase: List[str] = coordinate_size
__lowerCAmelCase: Tuple = shape_size
__lowerCAmelCase: List[Any] = num_labels
__lowerCAmelCase: Any = num_choices
__lowerCAmelCase: List[str] = scope
__lowerCAmelCase: Dict = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__lowerCAmelCase: Optional[Any] = text_seq_length
__lowerCAmelCase: List[Any] = (image_size // patch_size) ** 2 + 1
__lowerCAmelCase: int = self.text_seq_length + self.image_seq_length
def UpperCAmelCase ( self : Any ) -> Any:
__lowerCAmelCase: Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__lowerCAmelCase: Any = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__lowerCAmelCase: str = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__lowerCAmelCase: Optional[Any] = bbox[i, j, 3]
__lowerCAmelCase: Tuple = bbox[i, j, 1]
__lowerCAmelCase: Dict = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__lowerCAmelCase: Any = bbox[i, j, 2]
__lowerCAmelCase: int = bbox[i, j, 0]
__lowerCAmelCase: int = tmp_coordinate
__lowerCAmelCase: List[Any] = tf.constant(UpperCAmelCase )
__lowerCAmelCase: Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase: Union[str, Any] = None
if self.use_input_mask:
__lowerCAmelCase: List[Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
__lowerCAmelCase: int = None
if self.use_token_type_ids:
__lowerCAmelCase: List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__lowerCAmelCase: str = None
__lowerCAmelCase: Dict = None
if self.use_labels:
__lowerCAmelCase: Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase: List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__lowerCAmelCase: Dict = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple ) -> int:
__lowerCAmelCase: Tuple = TFLayoutLMvaModel(config=UpperCAmelCase )
# text + image
__lowerCAmelCase: Dict = model(UpperCAmelCase , pixel_values=UpperCAmelCase , training=UpperCAmelCase )
__lowerCAmelCase: List[str] = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , training=UpperCAmelCase , )
__lowerCAmelCase: Optional[Any] = model(UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , training=UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__lowerCAmelCase: str = model(UpperCAmelCase , training=UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__lowerCAmelCase: List[str] = model({'pixel_values': pixel_values} , training=UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any] ) -> int:
__lowerCAmelCase: List[str] = self.num_labels
__lowerCAmelCase: Tuple = TFLayoutLMvaForSequenceClassification(config=UpperCAmelCase )
__lowerCAmelCase: int = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , training=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : str , UpperCAmelCase : int ) -> Any:
__lowerCAmelCase: Union[str, Any] = self.num_labels
__lowerCAmelCase: List[str] = TFLayoutLMvaForTokenClassification(config=UpperCAmelCase )
__lowerCAmelCase: Any = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , training=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] ) -> Any:
__lowerCAmelCase: str = 2
__lowerCAmelCase: Dict = TFLayoutLMvaForQuestionAnswering(config=UpperCAmelCase )
__lowerCAmelCase: int = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , training=UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
__lowerCAmelCase: Union[str, Any] = self.prepare_config_and_inputs()
((__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase)): List[str] = config_and_inputs
__lowerCAmelCase: List[str] = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class A_ ( snake_case__ , snake_case__ , unittest.TestCase ):
_lowercase : List[Any] = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
_lowercase : Tuple = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
_lowercase : Union[str, Any] = False
_lowercase : Dict = False
_lowercase : Tuple = False
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] ) -> List[str]:
return True
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Dict=False ) -> dict:
__lowerCAmelCase: Optional[Any] = copy.deepcopy(UpperCAmelCase )
if model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: int = {
k: tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(UpperCAmelCase , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: Tuple = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__lowerCAmelCase: Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: str = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
__lowerCAmelCase: Tuple = TFLayoutLMvaModelTester(self )
__lowerCAmelCase: str = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=3_7 )
def UpperCAmelCase ( self : Tuple ) -> Dict:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
__lowerCAmelCase , __lowerCAmelCase: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase: List[Any] = model_class(UpperCAmelCase )
if getattr(UpperCAmelCase , 'hf_compute_loss' , UpperCAmelCase ):
# The number of elements in the loss should be the same as the number of elements in the label
__lowerCAmelCase: Optional[int] = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
__lowerCAmelCase: List[Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=UpperCAmelCase )[0]
]
__lowerCAmelCase: Tuple = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__lowerCAmelCase: Optional[Any] = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
__lowerCAmelCase: Tuple = prepared_for_class.pop('input_ids' )
__lowerCAmelCase: Union[str, Any] = model(UpperCAmelCase , **UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__lowerCAmelCase: Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
__lowerCAmelCase: Optional[int] = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
__lowerCAmelCase: str = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__lowerCAmelCase: Tuple = -1_0_0
__lowerCAmelCase: Union[str, Any] = tf.convert_to_tensor(UpperCAmelCase )
__lowerCAmelCase: Dict = model(UpperCAmelCase , **UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__lowerCAmelCase: str = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = model(UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__lowerCAmelCase: Any = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
# Get keys that were added with the _prepare_for_class function
__lowerCAmelCase: Tuple = prepared_for_class.keys() - inputs_dict.keys()
__lowerCAmelCase: Dict = inspect.signature(model.call ).parameters
__lowerCAmelCase: Dict = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__lowerCAmelCase: str = {0: 'input_ids'}
for label_key in label_keys:
__lowerCAmelCase: Optional[Any] = signature_names.index(UpperCAmelCase )
__lowerCAmelCase: Tuple = label_key
__lowerCAmelCase: Tuple = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__lowerCAmelCase: List[Any] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__lowerCAmelCase: Optional[Any] = prepared_for_class[value]
__lowerCAmelCase: Union[str, Any] = tuple(UpperCAmelCase )
# Send to model
__lowerCAmelCase: Any = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def UpperCAmelCase ( self : Dict ) -> Tuple:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : Dict ) -> int:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase: Tuple = type
self.model_tester.create_and_check_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : str ) -> List[str]:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : int ) -> List[str]:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> str:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase: Optional[int] = TFLayoutLMvaModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def _a ( ) -> Any:
"""simple docstring"""
__lowerCAmelCase: Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class A_ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self : int ) -> Dict:
return LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self : Any ) -> List[str]:
__lowerCAmelCase: Any = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
__lowerCAmelCase: Tuple = self.default_image_processor
__lowerCAmelCase: str = prepare_img()
__lowerCAmelCase: Optional[int] = image_processor(images=UpperCAmelCase , return_tensors='tf' ).pixel_values
__lowerCAmelCase: Dict = tf.constant([[1, 2]] )
__lowerCAmelCase: str = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__lowerCAmelCase: List[str] = model(input_ids=UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , training=UpperCAmelCase )
# verify the logits
__lowerCAmelCase: Tuple = (1, 1_9_9, 7_6_8)
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase )
__lowerCAmelCase: str = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase , atol=1E-4 ) )
| 322 | 1 |
def _a ( SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: str = len(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: List[Any] = sum(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: str = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
__lowerCAmelCase: Tuple = True
for i in range(1 , s + 1 ):
__lowerCAmelCase: Any = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
__lowerCAmelCase: Optional[int] = dp[i][j - 1]
if arr[i - 1] <= j:
__lowerCAmelCase: Union[str, Any] = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
__lowerCAmelCase: Tuple = s - 2 * j
break
return diff
| 322 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class A_ ( unittest.TestCase ):
def __init__( self : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any]=1_3 , UpperCAmelCase : Optional[int]=7 , UpperCAmelCase : Tuple=True , UpperCAmelCase : str=True , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[str]=9_9 , UpperCAmelCase : Optional[int]=3_2 , UpperCAmelCase : Dict=5 , UpperCAmelCase : int=4 , UpperCAmelCase : Optional[Any]=3_7 , UpperCAmelCase : List[str]="gelu" , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : str=5_1_2 , UpperCAmelCase : Dict=1_6 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : int=0.02 , UpperCAmelCase : List[Any]=4 , ) -> Optional[Any]:
__lowerCAmelCase: str = parent
__lowerCAmelCase: Dict = batch_size
__lowerCAmelCase: Optional[int] = seq_length
__lowerCAmelCase: Dict = is_training
__lowerCAmelCase: Optional[Any] = use_attention_mask
__lowerCAmelCase: List[Any] = use_token_type_ids
__lowerCAmelCase: Optional[int] = use_labels
__lowerCAmelCase: Optional[Any] = vocab_size
__lowerCAmelCase: Optional[Any] = hidden_size
__lowerCAmelCase: Tuple = num_hidden_layers
__lowerCAmelCase: List[str] = num_attention_heads
__lowerCAmelCase: int = intermediate_size
__lowerCAmelCase: Union[str, Any] = hidden_act
__lowerCAmelCase: List[Any] = hidden_dropout_prob
__lowerCAmelCase: List[str] = attention_probs_dropout_prob
__lowerCAmelCase: Optional[int] = max_position_embeddings
__lowerCAmelCase: Union[str, Any] = type_vocab_size
__lowerCAmelCase: int = type_sequence_label_size
__lowerCAmelCase: Union[str, Any] = initializer_range
__lowerCAmelCase: Any = num_choices
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
__lowerCAmelCase: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase: List[Any] = None
if self.use_attention_mask:
__lowerCAmelCase: List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase: Optional[Any] = None
if self.use_token_type_ids:
__lowerCAmelCase: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase: Optional[int] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self : Dict ) -> Any:
__lowerCAmelCase: Optional[int] = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: Optional[Any] = config_and_inputs
__lowerCAmelCase: Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class A_ ( snake_case__ , unittest.TestCase ):
_lowercase : Dict = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase: List[Any] = FlaxAlbertModelTester(self )
@slow
def UpperCAmelCase ( self : Tuple ) -> Dict:
for model_class_name in self.all_model_classes:
__lowerCAmelCase: Optional[Any] = model_class_name.from_pretrained('albert-base-v2' )
__lowerCAmelCase: Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase )
@require_flax
class A_ ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self : Any ) -> Any:
__lowerCAmelCase: List[Any] = FlaxAlbertModel.from_pretrained('albert-base-v2' )
__lowerCAmelCase: Optional[int] = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
__lowerCAmelCase: Tuple = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__lowerCAmelCase: Tuple = model(UpperCAmelCase , attention_mask=UpperCAmelCase )[0]
__lowerCAmelCase: str = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , UpperCAmelCase )
__lowerCAmelCase: List[str] = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCAmelCase , atol=1E-4 ) )
| 322 | 1 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
_a = logging.get_logger(__name__)
_a = {
'''tensor(bool)''': np.bool_,
'''tensor(int8)''': np.inta,
'''tensor(uint8)''': np.uinta,
'''tensor(int16)''': np.intaa,
'''tensor(uint16)''': np.uintaa,
'''tensor(int32)''': np.intaa,
'''tensor(uint32)''': np.uintaa,
'''tensor(int64)''': np.intaa,
'''tensor(uint64)''': np.uintaa,
'''tensor(float16)''': np.floataa,
'''tensor(float)''': np.floataa,
'''tensor(double)''': np.floataa,
}
class A_ :
def __init__( self : int , UpperCAmelCase : Tuple=None , **UpperCAmelCase : Optional[int] ) -> Tuple:
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
__lowerCAmelCase: List[str] = model
__lowerCAmelCase: Any = kwargs.get('model_save_dir' , UpperCAmelCase )
__lowerCAmelCase: List[str] = kwargs.get('latest_model_name' , UpperCAmelCase )
def __call__( self : str , **UpperCAmelCase : Dict ) -> str:
__lowerCAmelCase: Optional[int] = {k: np.array(UpperCAmelCase ) for k, v in kwargs.items()}
return self.model.run(UpperCAmelCase , UpperCAmelCase )
@staticmethod
def UpperCAmelCase ( UpperCAmelCase : Union[str, Path] , UpperCAmelCase : List[str]=None , UpperCAmelCase : Tuple=None ) -> int:
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
__lowerCAmelCase: Dict = 'CPUExecutionProvider'
return ort.InferenceSession(UpperCAmelCase , providers=[provider] , sess_options=UpperCAmelCase )
def UpperCAmelCase ( self : Dict , UpperCAmelCase : Union[str, Path] , UpperCAmelCase : Optional[str] = None , **UpperCAmelCase : List[Any] ) -> Tuple:
__lowerCAmelCase: List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
__lowerCAmelCase: int = self.model_save_dir.joinpath(self.latest_model_name )
__lowerCAmelCase: Tuple = Path(UpperCAmelCase ).joinpath(UpperCAmelCase )
try:
shutil.copyfile(UpperCAmelCase , UpperCAmelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
__lowerCAmelCase: str = self.model_save_dir.joinpath(UpperCAmelCase )
if src_path.exists():
__lowerCAmelCase: List[Any] = Path(UpperCAmelCase ).joinpath(UpperCAmelCase )
try:
shutil.copyfile(UpperCAmelCase , UpperCAmelCase )
except shutil.SameFileError:
pass
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : Union[str, os.PathLike] , **UpperCAmelCase : Optional[Any] , ) -> Tuple:
if os.path.isfile(UpperCAmelCase ):
logger.error(F'''Provided path ({save_directory}) should be a directory, not a file''' )
return
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
# saving model weights/files
self._save_pretrained(UpperCAmelCase , **UpperCAmelCase )
@classmethod
def UpperCAmelCase ( cls : List[Any] , UpperCAmelCase : Union[str, Path] , UpperCAmelCase : Optional[Union[bool, str, None]] = None , UpperCAmelCase : Optional[Union[str, None]] = None , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional["ort.SessionOptions"] = None , **UpperCAmelCase : int , ) -> Dict:
__lowerCAmelCase: str = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(UpperCAmelCase ):
__lowerCAmelCase: Optional[Any] = OnnxRuntimeModel.load_model(
os.path.join(UpperCAmelCase , UpperCAmelCase ) , provider=UpperCAmelCase , sess_options=UpperCAmelCase )
__lowerCAmelCase: str = Path(UpperCAmelCase )
# load model from hub
else:
# download model
__lowerCAmelCase: str = hf_hub_download(
repo_id=UpperCAmelCase , filename=UpperCAmelCase , use_auth_token=UpperCAmelCase , revision=UpperCAmelCase , cache_dir=UpperCAmelCase , force_download=UpperCAmelCase , )
__lowerCAmelCase: Optional[Any] = Path(UpperCAmelCase ).parent
__lowerCAmelCase: List[Any] = Path(UpperCAmelCase ).name
__lowerCAmelCase: int = OnnxRuntimeModel.load_model(UpperCAmelCase , provider=UpperCAmelCase , sess_options=UpperCAmelCase )
return cls(model=UpperCAmelCase , **UpperCAmelCase )
@classmethod
def UpperCAmelCase ( cls : Tuple , UpperCAmelCase : Union[str, Path] , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[str] = None , **UpperCAmelCase : List[str] , ) -> Dict:
__lowerCAmelCase: Any = None
if len(str(UpperCAmelCase ).split('@' ) ) == 2:
__lowerCAmelCase , __lowerCAmelCase: Tuple = model_id.split('@' )
return cls._from_pretrained(
model_id=UpperCAmelCase , revision=UpperCAmelCase , cache_dir=UpperCAmelCase , force_download=UpperCAmelCase , use_auth_token=UpperCAmelCase , **UpperCAmelCase , )
| 322 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
_a = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 1_2_8,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 5_0,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 1_0,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 1_0,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class A_ ( unittest.TestCase ):
@classmethod
def UpperCAmelCase ( cls : Dict ) -> List[str]:
__lowerCAmelCase: str = TOKEN
HfFolder.save_token(UpperCAmelCase )
@classmethod
def UpperCAmelCase ( cls : str ) -> List[Any]:
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def UpperCAmelCase ( self : int ) -> Optional[int]:
__lowerCAmelCase: Any = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub('test-config' , use_auth_token=self._token )
__lowerCAmelCase: str = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase , repo_id='test-config' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
__lowerCAmelCase: Union[str, Any] = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
def UpperCAmelCase ( self : int ) -> Dict:
__lowerCAmelCase: int = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
__lowerCAmelCase: Dict = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase , repo_id='valid_org/test-config-org' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
__lowerCAmelCase: int = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
CustomConfig.register_for_auto_class()
__lowerCAmelCase: Any = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
__lowerCAmelCase: int = AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=UpperCAmelCase )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 4_2 )
class A_ ( unittest.TestCase ):
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
__lowerCAmelCase: List[Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__lowerCAmelCase: Union[str, Any] = c.n_embd + 1 # int
__lowerCAmelCase: str = c.resid_pdrop + 1.0 # float
__lowerCAmelCase: List[Any] = not c.scale_attn_weights # bool
__lowerCAmelCase: List[str] = c.summary_type + 'foo' # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(UpperCAmelCase , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(UpperCAmelCase , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(UpperCAmelCase , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(UpperCAmelCase , c.summary_type , 'mismatch for key: summary_type' )
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
__lowerCAmelCase: str = PretrainedConfig()
__lowerCAmelCase: Optional[int] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
UpperCAmelCase , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
__lowerCAmelCase: int = [key for key, value in config_common_kwargs.items() if value == getattr(UpperCAmelCase , UpperCAmelCase )]
if len(UpperCAmelCase ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F''' {', '.join(UpperCAmelCase )}.''' )
def UpperCAmelCase ( self : int ) -> Optional[Any]:
with self.assertRaises(UpperCAmelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
__lowerCAmelCase: List[Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
__lowerCAmelCase: List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
# A mock response for an HTTP head request to emulate server down
__lowerCAmelCase: Union[str, Any] = mock.Mock()
__lowerCAmelCase: str = 5_0_0
__lowerCAmelCase: Optional[Any] = {}
__lowerCAmelCase: Optional[int] = HTTPError
__lowerCAmelCase: List[Any] = {}
# Download this model to make sure it's in the cache.
__lowerCAmelCase: Tuple = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=UpperCAmelCase ) as mock_head:
__lowerCAmelCase: Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase ( self : Any ) -> Optional[Any]:
# This test is for deprecated behavior and can be removed in v5
__lowerCAmelCase: Tuple = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def UpperCAmelCase ( self : Dict ) -> str:
__lowerCAmelCase: Optional[Any] = AutoConfig.from_pretrained('bert-base-cased' )
__lowerCAmelCase: Optional[Any] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(UpperCAmelCase )
__lowerCAmelCase: Tuple = 2
json.dump(configuration.to_dict() , open(os.path.join(UpperCAmelCase , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__lowerCAmelCase: Dict = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__lowerCAmelCase: Dict = ['config.42.0.0.json']
__lowerCAmelCase: Optional[int] = 7_6_8
configuration.save_pretrained(UpperCAmelCase )
shutil.move(os.path.join(UpperCAmelCase , 'config.4.0.0.json' ) , os.path.join(UpperCAmelCase , 'config.42.0.0.json' ) )
__lowerCAmelCase: int = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertEqual(new_configuration.hidden_size , 7_6_8 )
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
__lowerCAmelCase: Tuple = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
__lowerCAmelCase: List[Any] = 'v4.0.0'
__lowerCAmelCase , __lowerCAmelCase: Any = new_transformers.models.auto.AutoConfig.from_pretrained(
UpperCAmelCase , return_unused_kwargs=UpperCAmelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(UpperCAmelCase , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__lowerCAmelCase: List[Any] = 'v3.0.0'
__lowerCAmelCase: Union[str, Any] = old_transformers.models.auto.AutoConfig.from_pretrained(UpperCAmelCase )
self.assertEqual(old_configuration.hidden_size , 7_6_8 )
| 322 | 1 |
def _a ( SCREAMING_SNAKE_CASE : Any ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase: Tuple = [0] * len(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: List[str] = []
__lowerCAmelCase: Optional[int] = []
__lowerCAmelCase: str = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if indegree[i] == 0:
queue.append(SCREAMING_SNAKE_CASE )
while queue:
__lowerCAmelCase: Union[str, Any] = queue.pop(0 )
cnt += 1
topo.append(SCREAMING_SNAKE_CASE )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(SCREAMING_SNAKE_CASE )
if cnt != len(SCREAMING_SNAKE_CASE ):
print('Cycle exists' )
else:
print(SCREAMING_SNAKE_CASE )
# Adjacency List of Graph
_a = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 322 |
_a = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def _a ( SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
__lowerCAmelCase: Optional[int] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00]
number //= 10_00_00
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_a = [None] * 1_0_0_0_0_0_0_0
_a = True
_a = False
def _a ( SCREAMING_SNAKE_CASE : int ) -> bool:
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
__lowerCAmelCase: int = chain(next_number(SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase: Tuple = number_chain
while number < 10_00_00_00:
__lowerCAmelCase: Dict = number_chain
number *= 10
return number_chain
def _a ( SCREAMING_SNAKE_CASE : int = 10_00_00_00 ) -> int:
"""simple docstring"""
for i in range(1 , SCREAMING_SNAKE_CASE ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{solution() = }")
| 322 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
_a = logging.get_logger(__name__)
_a = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
_a = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
_a = {
'''RUCAIBox/mvp''': 1_0_2_4,
}
class A_ ( snake_case__ ):
_lowercase : Any = VOCAB_FILES_NAMES
_lowercase : int = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : List[Any] = ['input_ids', 'attention_mask']
_lowercase : Tuple = MvpTokenizer
def __init__( self : Optional[Any] , UpperCAmelCase : List[str]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Any="replace" , UpperCAmelCase : List[Any]="<s>" , UpperCAmelCase : Dict="</s>" , UpperCAmelCase : Optional[Any]="</s>" , UpperCAmelCase : Tuple="<s>" , UpperCAmelCase : List[Any]="<unk>" , UpperCAmelCase : Union[str, Any]="<pad>" , UpperCAmelCase : List[Any]="<mask>" , UpperCAmelCase : Dict=False , UpperCAmelCase : Tuple=True , **UpperCAmelCase : Tuple , ) -> List[Any]:
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase , **UpperCAmelCase , )
__lowerCAmelCase: Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , UpperCAmelCase ) != add_prefix_space:
__lowerCAmelCase: str = getattr(UpperCAmelCase , pre_tok_state.pop('type' ) )
__lowerCAmelCase: int = add_prefix_space
__lowerCAmelCase: Union[str, Any] = pre_tok_class(**UpperCAmelCase )
__lowerCAmelCase: Any = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__lowerCAmelCase: Dict = 'post_processor'
__lowerCAmelCase: List[Any] = getattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
if tokenizer_component_instance:
__lowerCAmelCase: Union[str, Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__lowerCAmelCase: int = tuple(state['sep'] )
if "cls" in state:
__lowerCAmelCase: str = tuple(state['cls'] )
__lowerCAmelCase: List[Any] = False
if state.get('add_prefix_space' , UpperCAmelCase ) != add_prefix_space:
__lowerCAmelCase: str = add_prefix_space
__lowerCAmelCase: Tuple = True
if state.get('trim_offsets' , UpperCAmelCase ) != trim_offsets:
__lowerCAmelCase: Optional[Any] = trim_offsets
__lowerCAmelCase: List[str] = True
if changes_to_apply:
__lowerCAmelCase: Union[str, Any] = getattr(UpperCAmelCase , state.pop('type' ) )
__lowerCAmelCase: Optional[int] = component_class(**UpperCAmelCase )
setattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
@property
def UpperCAmelCase ( self : int ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCAmelCase ( self : Dict , UpperCAmelCase : List[str] ) -> Any:
__lowerCAmelCase: List[str] = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else value
__lowerCAmelCase: str = value
def UpperCAmelCase ( self : Optional[Any] , *UpperCAmelCase : Any , **UpperCAmelCase : int ) -> BatchEncoding:
__lowerCAmelCase: List[Any] = kwargs.get('is_split_into_words' , UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self : Dict , *UpperCAmelCase : int , **UpperCAmelCase : Any ) -> BatchEncoding:
__lowerCAmelCase: Any = kwargs.get('is_split_into_words' , UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
__lowerCAmelCase: str = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def UpperCAmelCase ( self : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int]=None ) -> Optional[int]:
__lowerCAmelCase: List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
__lowerCAmelCase: Tuple = [self.sep_token_id]
__lowerCAmelCase: Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 322 |
def _a ( SCREAMING_SNAKE_CASE : int ) -> bool:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: List[Any] = f'''Input value of [number={number}] must be an integer'''
raise TypeError(SCREAMING_SNAKE_CASE )
if number < 0:
return False
__lowerCAmelCase: str = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 322 | 1 |
import os
from datetime import datetime as dt
from github import Github
_a = [
'''good first issue''',
'''feature request''',
'''wip''',
]
def _a ( ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase: Dict = Github(os.environ['GITHUB_TOKEN'] )
__lowerCAmelCase: Tuple = g.get_repo('huggingface/accelerate' )
__lowerCAmelCase: str = repo.get_issues(state='open' )
for issue in open_issues:
__lowerCAmelCase: Optional[int] = sorted([comment for comment in issue.get_comments()] , key=lambda SCREAMING_SNAKE_CASE : i.created_at , reverse=SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Dict = comments[0] if len(SCREAMING_SNAKE_CASE ) > 0 else None
__lowerCAmelCase: Tuple = dt.utcnow()
__lowerCAmelCase: Optional[int] = (current_time - issue.updated_at).days
__lowerCAmelCase: str = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='closed' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 322 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ :
def __init__( self : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str=1_3 , UpperCAmelCase : Optional[Any]=7 , UpperCAmelCase : str=True , UpperCAmelCase : Any=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : Any=True , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : List[str]=False , UpperCAmelCase : Tuple=False , UpperCAmelCase : int=False , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : Any=9_9 , UpperCAmelCase : str=0 , UpperCAmelCase : Dict=3_2 , UpperCAmelCase : int=5 , UpperCAmelCase : Optional[int]=4 , UpperCAmelCase : Any=0.1 , UpperCAmelCase : str=0.1 , UpperCAmelCase : int=5_1_2 , UpperCAmelCase : str=2 , UpperCAmelCase : Optional[int]=0.02 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : Dict="last" , UpperCAmelCase : int=True , UpperCAmelCase : Dict=None , UpperCAmelCase : Union[str, Any]=0 , ) -> Dict:
__lowerCAmelCase: Optional[int] = parent
__lowerCAmelCase: Dict = batch_size
__lowerCAmelCase: Tuple = seq_length
__lowerCAmelCase: Tuple = is_training
__lowerCAmelCase: Optional[Any] = use_input_lengths
__lowerCAmelCase: List[str] = use_token_type_ids
__lowerCAmelCase: Dict = use_labels
__lowerCAmelCase: int = gelu_activation
__lowerCAmelCase: Optional[int] = sinusoidal_embeddings
__lowerCAmelCase: Tuple = causal
__lowerCAmelCase: Optional[Any] = asm
__lowerCAmelCase: int = n_langs
__lowerCAmelCase: Tuple = vocab_size
__lowerCAmelCase: List[Any] = n_special
__lowerCAmelCase: List[Any] = hidden_size
__lowerCAmelCase: Union[str, Any] = num_hidden_layers
__lowerCAmelCase: Dict = num_attention_heads
__lowerCAmelCase: int = hidden_dropout_prob
__lowerCAmelCase: List[str] = attention_probs_dropout_prob
__lowerCAmelCase: Dict = max_position_embeddings
__lowerCAmelCase: List[str] = type_sequence_label_size
__lowerCAmelCase: str = initializer_range
__lowerCAmelCase: List[str] = num_labels
__lowerCAmelCase: List[str] = num_choices
__lowerCAmelCase: Optional[int] = summary_type
__lowerCAmelCase: Any = use_proj
__lowerCAmelCase: Optional[Any] = scope
__lowerCAmelCase: Dict = bos_token_id
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
__lowerCAmelCase: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase: str = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase: Any = None
if self.use_input_lengths:
__lowerCAmelCase: Optional[Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__lowerCAmelCase: str = None
if self.use_token_type_ids:
__lowerCAmelCase: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__lowerCAmelCase: int = None
__lowerCAmelCase: Optional[int] = None
__lowerCAmelCase: Optional[int] = None
if self.use_labels:
__lowerCAmelCase: Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase: Optional[int] = ids_tensor([self.batch_size] , 2 ).float()
__lowerCAmelCase: str = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase: Dict = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def UpperCAmelCase ( self : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : List[str] , ) -> Optional[int]:
__lowerCAmelCase: List[str] = XLMModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: Any = model(UpperCAmelCase , lengths=UpperCAmelCase , langs=UpperCAmelCase )
__lowerCAmelCase: List[str] = model(UpperCAmelCase , langs=UpperCAmelCase )
__lowerCAmelCase: List[str] = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , ) -> int:
__lowerCAmelCase: str = XLMWithLMHeadModel(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: Union[str, Any] = model(UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : str , UpperCAmelCase : Dict , ) -> List[str]:
__lowerCAmelCase: Dict = XLMForQuestionAnsweringSimple(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: str = model(UpperCAmelCase )
__lowerCAmelCase: List[str] = model(UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , ) -> Tuple:
__lowerCAmelCase: Union[str, Any] = XLMForQuestionAnswering(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: List[str] = model(UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = model(
UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , cls_index=UpperCAmelCase , is_impossible=UpperCAmelCase , p_mask=UpperCAmelCase , )
__lowerCAmelCase: Any = model(
UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , cls_index=UpperCAmelCase , is_impossible=UpperCAmelCase , )
((__lowerCAmelCase) , ): List[str] = result_with_labels.to_tuple()
__lowerCAmelCase: Union[str, Any] = model(UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase )
((__lowerCAmelCase) , ): List[Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCAmelCase ( self : Dict , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : List[str] , ) -> List[Any]:
__lowerCAmelCase: Optional[Any] = XLMForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: List[Any] = model(UpperCAmelCase )
__lowerCAmelCase: Tuple = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , ) -> List[Any]:
__lowerCAmelCase: Union[str, Any] = self.num_labels
__lowerCAmelCase: Tuple = XLMForTokenClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: Optional[int] = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , ) -> Union[str, Any]:
__lowerCAmelCase: List[Any] = self.num_choices
__lowerCAmelCase: Optional[Any] = XLMForMultipleChoice(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase: List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase: Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase: Any = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self : Tuple ) -> int:
__lowerCAmelCase: Optional[Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): Union[str, Any] = config_and_inputs
__lowerCAmelCase: Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class A_ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
_lowercase : Any = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowercase : Any = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_lowercase : Optional[int] = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCAmelCase ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : str ) -> int:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple=False ) -> Dict:
__lowerCAmelCase: Optional[Any] = super()._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__lowerCAmelCase: str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase )
return inputs_dict
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
__lowerCAmelCase: int = XLMModelTester(self )
__lowerCAmelCase: Optional[int] = ConfigTester(self , config_class=UpperCAmelCase , emb_dim=3_7 )
def UpperCAmelCase ( self : List[str] ) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Dict ) -> List[Any]:
__lowerCAmelCase: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*UpperCAmelCase )
def UpperCAmelCase ( self : List[Any] ) -> int:
__lowerCAmelCase: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> Tuple:
__lowerCAmelCase: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
__lowerCAmelCase: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
__lowerCAmelCase: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> Tuple:
__lowerCAmelCase: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*UpperCAmelCase )
def UpperCAmelCase ( self : Any ) -> Any:
__lowerCAmelCase: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : Dict=1 ) -> Dict:
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(
[isinstance(UpperCAmelCase , UpperCAmelCase ) for iter_attentions in attentions] , [True] * len(UpperCAmelCase ) )
self.assertEqual(len(UpperCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(UpperCAmelCase ):
# adds PAD dummy token
__lowerCAmelCase: int = min_length + idx + 1
__lowerCAmelCase: Union[str, Any] = min_length + idx + 1
__lowerCAmelCase: Any = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(UpperCAmelCase ) )
def UpperCAmelCase ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str=False , UpperCAmelCase : Optional[int]=1 ) -> Union[str, Any]:
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(
[isinstance(UpperCAmelCase , UpperCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(UpperCAmelCase ) , )
self.assertEqual(len(UpperCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(UpperCAmelCase ):
# adds PAD dummy token
__lowerCAmelCase: Any = min_length + idx + 1
__lowerCAmelCase: str = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(UpperCAmelCase ) , )
pass
@slow
def UpperCAmelCase ( self : int ) -> Tuple:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase: List[Any] = XLMModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_torch
class A_ ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
__lowerCAmelCase: Union[str, Any] = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(UpperCAmelCase )
__lowerCAmelCase: Optional[int] = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=UpperCAmelCase ) # the president
__lowerCAmelCase: Union[str, Any] = [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__lowerCAmelCase: str = model.generate(UpperCAmelCase , do_sample=UpperCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , UpperCAmelCase )
| 322 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.