code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case : Dict = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[str] = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[Any] = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Union[str, Any] = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : str = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : int = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
snake_case : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 |
def __lowercase ( __lowerCAmelCase : int ):
if length <= 0 or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(__lowerCAmelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 657 | 1 |
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
snake_case : int = parse(importlib.metadata.version('''torch'''))
def __lowercase ( __lowerCAmelCase : Union[str, Version] , __lowerCAmelCase : str , __lowerCAmelCase : str ):
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F'`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}' )
a__ = STR_OPERATION_TO_FUNC[operation]
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
a__ = parse(importlib.metadata.version(__lowerCAmelCase ) )
return operation(__lowerCAmelCase , parse(__lowerCAmelCase ) )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : str ):
return compare_versions(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
| 657 |
def __lowercase ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int ):
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError('The length of profit and weight must be same.' )
if max_weight <= 0:
raise ValueError('max_weight must greater than zero.' )
if any(p < 0 for p in profit ):
raise ValueError('Profit can not be negative.' )
if any(w < 0 for w in weight ):
raise ValueError('Weight can not be negative.' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
a__ = [p / w for p, w in zip(__lowerCAmelCase , __lowerCAmelCase )]
# Creating a copy of the list and sorting profit/weight in ascending order
a__ = sorted(__lowerCAmelCase )
# declaring useful variables
a__ = len(__lowerCAmelCase )
a__ = 0
a__ = 0
a__ = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
a__ = sorted_profit_by_weight[length - i - 1]
a__ = profit_by_weight.index(__lowerCAmelCase )
a__ = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'''Input profits, weights, and then max_weight (all positive ints) separated by '''
'''spaces.'''
)
snake_case : Tuple = [int(x) for x in input('''Input profits separated by spaces: ''').split()]
snake_case : Optional[int] = [int(x) for x in input('''Input weights separated by spaces: ''').split()]
snake_case : List[str] = int(input('''Max weight allowed: '''))
# Function Call
calc_profit(profit, weight, max_weight)
| 657 | 1 |
from sklearn.metrics import fa_score
import datasets
snake_case : Optional[int] = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
snake_case : List[Any] = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
snake_case : Union[str, Any] = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ (datasets.Metric ):
def lowerCamelCase__( self :Any ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) ,reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] ,)
def lowerCamelCase__( self :Dict ,__snake_case :str ,__snake_case :str ,__snake_case :Dict=None ,__snake_case :str=1 ,__snake_case :Optional[int]="binary" ,__snake_case :Union[str, Any]=None ) -> Tuple:
a__ = fa_score(
__snake_case ,__snake_case ,labels=__snake_case ,pos_label=__snake_case ,average=__snake_case ,sample_weight=__snake_case )
return {"f1": float(__snake_case ) if score.size == 1 else score}
| 657 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case : Optional[Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[int] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
snake_case : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 1 |
def __lowercase ( __lowerCAmelCase : int = 1_0 , __lowerCAmelCase : int = 1_0_0_0 , __lowerCAmelCase : bool = True ):
assert (
isinstance(__lowerCAmelCase , __lowerCAmelCase )
and isinstance(__lowerCAmelCase , __lowerCAmelCase )
and isinstance(__lowerCAmelCase , __lowerCAmelCase )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('Invalid value for min_val or max_val (min_value < max_value)' )
return min_val if option else max_val
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
return int((number_a + number_a) / 2 )
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ):
assert (
isinstance(__lowerCAmelCase , __lowerCAmelCase ) and isinstance(__lowerCAmelCase , __lowerCAmelCase ) and isinstance(__lowerCAmelCase , __lowerCAmelCase )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('argument value for lower and higher must be(lower > higher)' )
if not lower < to_guess < higher:
raise ValueError(
'guess value must be within the range of lower and higher value' )
def answer(__lowerCAmelCase : int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('started...' )
a__ = lower
a__ = higher
a__ = []
while True:
a__ = get_avg(__lowerCAmelCase , __lowerCAmelCase )
last_numbers.append(__lowerCAmelCase )
if answer(__lowerCAmelCase ) == "low":
a__ = number
elif answer(__lowerCAmelCase ) == "high":
a__ = number
else:
break
print(F'guess the number : {last_numbers[-1]}' )
print(F'details : {last_numbers!s}' )
def __lowercase ( ):
a__ = int(input('Enter lower value : ' ).strip() )
a__ = int(input('Enter high value : ' ).strip() )
a__ = int(input('Enter value to guess : ' ).strip() )
guess_the_number(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 657 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class snake_case_ :
def __init__( self :Optional[Any] ,__snake_case :str ,__snake_case :Optional[Any]=14 ,__snake_case :Dict=7 ,__snake_case :Optional[int]=True ,__snake_case :Optional[int]=True ,__snake_case :Dict=True ,__snake_case :List[Any]=True ,__snake_case :Optional[int]=True ,__snake_case :Any=99 ,__snake_case :List[str]=32 ,__snake_case :List[str]=5 ,__snake_case :Tuple=4 ,__snake_case :Optional[int]=37 ,__snake_case :Optional[int]="gelu" ,__snake_case :Tuple=0.1 ,__snake_case :Tuple=0.1 ,__snake_case :Dict=5_12 ,__snake_case :Union[str, Any]=16 ,__snake_case :str=2 ,__snake_case :Optional[Any]=0.02 ,__snake_case :Dict=3 ,__snake_case :Optional[Any]=4 ,__snake_case :Optional[Any]=None ,) -> Tuple:
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_token_type_ids
a__ = use_input_mask
a__ = use_labels
a__ = use_mc_token_ids
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_labels
a__ = num_choices
a__ = scope
a__ = self.vocab_size - 1
def lowerCamelCase__( self :Optional[int] ) -> Union[str, Any]:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
a__ = None
if self.use_input_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
a__ = None
if self.use_mc_token_ids:
a__ = ids_tensor([self.batch_size, self.num_choices] ,self.seq_length )
a__ = None
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
a__ = ids_tensor([self.batch_size] ,self.num_choices )
a__ = self.get_config()
a__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase__( self :Optional[Any] ) -> Tuple:
return CTRLConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
def lowerCamelCase__( self :str ,__snake_case :List[str] ,__snake_case :Any ,__snake_case :Dict ,__snake_case :int ,__snake_case :Optional[Any] ,*__snake_case :List[str] ) -> List[Any]:
a__ = CTRLModel(config=__snake_case )
model.to(__snake_case )
model.eval()
model(__snake_case ,token_type_ids=__snake_case ,head_mask=__snake_case )
model(__snake_case ,token_type_ids=__snake_case )
a__ = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) ,config.n_layer )
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[str] ,__snake_case :Union[str, Any] ,__snake_case :str ,__snake_case :str ,__snake_case :Dict ,*__snake_case :Dict ) -> Dict:
a__ = CTRLLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__( self :Optional[Any] ) -> Optional[Any]:
a__ = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) = config_and_inputs
a__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return config, inputs_dict
def lowerCamelCase__( self :Optional[int] ,__snake_case :Tuple ,__snake_case :str ,__snake_case :str ,__snake_case :List[str] ,*__snake_case :Optional[int] ) -> List[Any]:
a__ = self.num_labels
a__ = CTRLForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
@require_torch
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase__ : Any = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCAmelCase__ : Any = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : List[str] = False
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Optional[int] ,__snake_case :int ,__snake_case :Any ,__snake_case :List[str] ,__snake_case :Dict ) -> Union[str, Any]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowerCamelCase__( self :int ) -> List[str]:
a__ = CTRLModelTester(self )
a__ = ConfigTester(self ,config_class=__snake_case ,n_embd=37 )
def lowerCamelCase__( self :str ) -> str:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__( self :Tuple ) -> List[Any]:
self.config_tester.run_common_tests()
def lowerCamelCase__( self :str ) -> str:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__snake_case )
def lowerCamelCase__( self :List[Any] ) -> Any:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__snake_case )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase__( self :Union[str, Any] ) -> Tuple:
pass
@slow
def lowerCamelCase__( self :int ) -> List[Any]:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = CTRLModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def lowerCamelCase__( self :Dict ) -> List[str]:
pass
@require_torch
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Union[str, Any] ) -> str:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowerCamelCase__( self :Any ) -> Dict:
a__ = CTRLLMHeadModel.from_pretrained('ctrl' )
model.to(__snake_case )
a__ = torch.tensor(
[[1_18_59, 0, 16_11, 8]] ,dtype=torch.long ,device=__snake_case ) # Legal the president is
a__ = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
a__ = model.generate(__snake_case ,do_sample=__snake_case )
self.assertListEqual(output_ids[0].tolist() ,__snake_case )
| 657 | 1 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_ (lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Optional[Any] = OpenAIGPTTokenizer
UpperCAmelCase__ : Any = OpenAIGPTTokenizerFast
UpperCAmelCase__ : str = True
UpperCAmelCase__ : Any = False
def lowerCamelCase__( self :List[str] ) -> str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
a__ = dict(zip(__snake_case ,range(len(__snake_case ) ) ) )
a__ = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
a__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
a__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ) as fp:
fp.write(json.dumps(__snake_case ) )
with open(self.merges_file ,'w' ) as fp:
fp.write('\n'.join(__snake_case ) )
def lowerCamelCase__( self :Optional[int] ,__snake_case :Optional[Any] ) -> int:
return "lower newer", "lower newer"
def lowerCamelCase__( self :Any ) -> List[str]:
a__ = OpenAIGPTTokenizer(self.vocab_file ,self.merges_file )
a__ = 'lower'
a__ = ['low', 'er</w>']
a__ = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case ,__snake_case )
a__ = tokens + ['<unk>']
a__ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) ,__snake_case )
def lowerCamelCase__( self :Any ,__snake_case :Optional[Any]=15 ) -> Optional[int]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a__ = self.rust_tokenizer_class.from_pretrained(__snake_case ,**__snake_case )
# Simple input
a__ = 'This is a simple input'
a__ = ['This is a simple input 1', 'This is a simple input 2']
a__ = ('This is a simple input', 'This is a pair')
a__ = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(__snake_case ,tokenizer_r.encode ,__snake_case ,max_length=__snake_case ,padding='max_length' )
# Simple input
self.assertRaises(__snake_case ,tokenizer_r.encode_plus ,__snake_case ,max_length=__snake_case ,padding='max_length' )
# Simple input
self.assertRaises(
__snake_case ,tokenizer_r.batch_encode_plus ,__snake_case ,max_length=__snake_case ,padding='max_length' ,)
# Pair input
self.assertRaises(__snake_case ,tokenizer_r.encode ,__snake_case ,max_length=__snake_case ,padding='max_length' )
# Pair input
self.assertRaises(__snake_case ,tokenizer_r.encode_plus ,__snake_case ,max_length=__snake_case ,padding='max_length' )
# Pair input
self.assertRaises(
__snake_case ,tokenizer_r.batch_encode_plus ,__snake_case ,max_length=__snake_case ,padding='max_length' ,)
def lowerCamelCase__( self :int ) -> str:
pass
@require_ftfy
@require_spacy
@require_tokenizers
class snake_case_ (lowerCamelCase_ ):
pass
| 657 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = 1
@register_to_config
def __init__( self :Optional[int] ,__snake_case :int = 10_00 ,__snake_case :Optional[Union[np.ndarray, List[float]]] = None ) -> int:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__snake_case )
# standard deviation of the initial noise distribution
a__ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
a__ = 4
# running values
a__ = []
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :int ,__snake_case :Union[str, torch.device] = None ) -> Union[str, Any]:
a__ = num_inference_steps
a__ = torch.linspace(1 ,0 ,num_inference_steps + 1 )[:-1]
a__ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
a__ = torch.tensor(self.config.trained_betas ,dtype=torch.floataa )
else:
a__ = torch.sin(steps * math.pi / 2 ) ** 2
a__ = (1.0 - self.betas**2) ** 0.5
a__ = (torch.atana(self.betas ,self.alphas ) / math.pi * 2)[:-1]
a__ = timesteps.to(__snake_case )
a__ = []
def lowerCamelCase__( self :Any ,__snake_case :torch.FloatTensor ,__snake_case :int ,__snake_case :torch.FloatTensor ,__snake_case :bool = True ,) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
a__ = (self.timesteps == timestep).nonzero().item()
a__ = timestep_index + 1
a__ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__snake_case )
if len(self.ets ) == 1:
a__ = self.ets[-1]
elif len(self.ets ) == 2:
a__ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
a__ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
a__ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
a__ = self._get_prev_sample(__snake_case ,__snake_case ,__snake_case ,__snake_case )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :torch.FloatTensor ,*__snake_case :int ,**__snake_case :Optional[int] ) -> torch.FloatTensor:
return sample
def lowerCamelCase__( self :Optional[Any] ,__snake_case :List[Any] ,__snake_case :Optional[int] ,__snake_case :Dict ,__snake_case :Any ) -> Optional[Any]:
a__ = self.alphas[timestep_index]
a__ = self.betas[timestep_index]
a__ = self.alphas[prev_timestep_index]
a__ = self.betas[prev_timestep_index]
a__ = (sample - sigma * ets) / max(__snake_case ,1E-8 )
a__ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self :Any ) -> Union[str, Any]:
return self.config.num_train_timesteps
| 657 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
snake_case : int = {
'''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''],
'''tokenization_roc_bert''': ['''RoCBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[int] = [
'''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoCBertForCausalLM''',
'''RoCBertForMaskedLM''',
'''RoCBertForMultipleChoice''',
'''RoCBertForPreTraining''',
'''RoCBertForQuestionAnswering''',
'''RoCBertForSequenceClassification''',
'''RoCBertForTokenClassification''',
'''RoCBertLayer''',
'''RoCBertModel''',
'''RoCBertPreTrainedModel''',
'''load_tf_weights_in_roc_bert''',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
snake_case : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case : Any = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Union[str, Any] = ['''MobileViTFeatureExtractor''']
snake_case : int = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Dict = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Tuple = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
snake_case : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 1 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
snake_case : List[Any] = logging.getLogger(__name__)
@dataclass
class snake_case_ :
UpperCAmelCase__ : Optional[int] = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase__ : bool = field(
default=lowerCamelCase_ , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
UpperCAmelCase__ : bool = field(
default=lowerCamelCase_ , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=lowerCamelCase_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=lowerCamelCase_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=lowerCamelCase_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
} , )
@dataclass
class snake_case_ :
UpperCAmelCase__ : str = field(
default=lowerCamelCase_ , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCAmelCase__ : str = field(
default=lowerCamelCase_ , metadata={'''help''': '''Evaluation language. Also train language if `train_language` is set to None.'''} )
UpperCAmelCase__ : Optional[str] = field(
default=lowerCamelCase_ , metadata={'''help''': '''Train language if it is different from the evaluation language.'''} )
UpperCAmelCase__ : Optional[str] = field(
default=lowerCamelCase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCAmelCase__ : Optional[str] = field(
default=lowerCamelCase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCAmelCase__ : Optional[str] = field(
default=lowerCamelCase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCAmelCase__ : Optional[bool] = field(
default=lowerCamelCase_ , metadata={'''help''': '''arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'''} , )
UpperCAmelCase__ : bool = field(
default=lowerCamelCase_ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCAmelCase__ : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCAmelCase__ : bool = field(
default=lowerCamelCase_ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
UpperCAmelCase__ : bool = field(
default=lowerCamelCase_ , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def __lowercase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
a__ , a__ , a__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_xnli' , __lowerCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
a__ = training_args.get_process_log_level()
logger.setLevel(__lowerCAmelCase )
datasets.utils.logging.set_verbosity(__lowerCAmelCase )
transformers.utils.logging.set_verbosity(__lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
a__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
a__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
a__ = load_dataset(
'xnli' , model_args.language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
a__ = load_dataset(
'xnli' , model_args.train_language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
a__ = train_dataset.features['label'].names
if training_args.do_eval:
a__ = load_dataset(
'xnli' , model_args.language , split='validation' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
a__ = eval_dataset.features['label'].names
if training_args.do_predict:
a__ = load_dataset(
'xnli' , model_args.language , split='test' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
a__ = predict_dataset.features['label'].names
# Labels
a__ = len(__lowerCAmelCase )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowerCAmelCase , idalabel={str(__lowerCAmelCase ): label for i, label in enumerate(__lowerCAmelCase )} , labelaid={label: i for i, label in enumerate(__lowerCAmelCase )} , finetuning_task='xnli' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
a__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
a__ = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
a__ = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
a__ = False
def preprocess_function(__lowerCAmelCase : List[Any] ):
# Tokenize the texts
return tokenizer(
examples['premise'] , examples['hypothesis'] , padding=__lowerCAmelCase , max_length=data_args.max_seq_length , truncation=__lowerCAmelCase , )
if training_args.do_train:
if data_args.max_train_samples is not None:
a__ = min(len(__lowerCAmelCase ) , data_args.max_train_samples )
a__ = train_dataset.select(range(__lowerCAmelCase ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
a__ = train_dataset.map(
__lowerCAmelCase , batched=__lowerCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on train dataset' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(__lowerCAmelCase ) ) , 3 ):
logger.info(F'Sample {index} of the training set: {train_dataset[index]}.' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
a__ = min(len(__lowerCAmelCase ) , data_args.max_eval_samples )
a__ = eval_dataset.select(range(__lowerCAmelCase ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
a__ = eval_dataset.map(
__lowerCAmelCase , batched=__lowerCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on validation dataset' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
a__ = min(len(__lowerCAmelCase ) , data_args.max_predict_samples )
a__ = predict_dataset.select(range(__lowerCAmelCase ) )
with training_args.main_process_first(desc='prediction dataset map pre-processing' ):
a__ = predict_dataset.map(
__lowerCAmelCase , batched=__lowerCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on prediction dataset' , )
# Get the metric function
a__ = evaluate.load('xnli' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__lowerCAmelCase : EvalPrediction ):
a__ = p.predictions[0] if isinstance(p.predictions , __lowerCAmelCase ) else p.predictions
a__ = np.argmax(__lowerCAmelCase , axis=1 )
return metric.compute(predictions=__lowerCAmelCase , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
a__ = default_data_collator
elif training_args.fpaa:
a__ = DataCollatorWithPadding(__lowerCAmelCase , pad_to_multiple_of=8 )
else:
a__ = None
# Initialize our Trainer
a__ = Trainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__lowerCAmelCase , tokenizer=__lowerCAmelCase , data_collator=__lowerCAmelCase , )
# Training
if training_args.do_train:
a__ = None
if training_args.resume_from_checkpoint is not None:
a__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
a__ = last_checkpoint
a__ = trainer.train(resume_from_checkpoint=__lowerCAmelCase )
a__ = train_result.metrics
a__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__lowerCAmelCase )
)
a__ = min(__lowerCAmelCase , len(__lowerCAmelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , __lowerCAmelCase )
trainer.save_metrics('train' , __lowerCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
a__ = trainer.evaluate(eval_dataset=__lowerCAmelCase )
a__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__lowerCAmelCase )
a__ = min(__lowerCAmelCase , len(__lowerCAmelCase ) )
trainer.log_metrics('eval' , __lowerCAmelCase )
trainer.save_metrics('eval' , __lowerCAmelCase )
# Prediction
if training_args.do_predict:
logger.info('*** Predict ***' )
a__ , a__ , a__ = trainer.predict(__lowerCAmelCase , metric_key_prefix='predict' )
a__ = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(__lowerCAmelCase )
)
a__ = min(__lowerCAmelCase , len(__lowerCAmelCase ) )
trainer.log_metrics('predict' , __lowerCAmelCase )
trainer.save_metrics('predict' , __lowerCAmelCase )
a__ = np.argmax(__lowerCAmelCase , axis=1 )
a__ = os.path.join(training_args.output_dir , 'predictions.txt' )
if trainer.is_world_process_zero():
with open(__lowerCAmelCase , 'w' ) as writer:
writer.write('index\tprediction\n' )
for index, item in enumerate(__lowerCAmelCase ):
a__ = label_list[item]
writer.write(F'{index}\t{item}\n' )
if __name__ == "__main__":
main()
| 657 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
snake_case : Dict = logging.get_logger(__name__)
snake_case : Any = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
a__ = TOKENIZER_CLASSES
else:
a__ = {tokenizer_name: getattr(__lowerCAmelCase , tokenizer_name + 'Fast' )}
logger.info(F'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
a__ = TOKENIZER_CLASSES[tokenizer_name]
a__ = True
if checkpoint_name is None:
a__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
a__ = [checkpoint_name]
logger.info(F'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(F'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
a__ = tokenizer_class.from_pretrained(__lowerCAmelCase , force_download=__lowerCAmelCase )
# Save fast tokenizer
logger.info(F'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
a__ , a__ = checkpoint.split('/' )
a__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
elif add_prefix:
a__ = checkpoint
a__ = dump_path
else:
a__ = None
a__ = dump_path
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
a__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
a__ = file_path.split(__lowerCAmelCase )[-1][0]
if next_char == "/":
a__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
a__ = None
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
a__ = tokenizer.save_pretrained(
__lowerCAmelCase , legacy_format=__lowerCAmelCase , filename_prefix=__lowerCAmelCase )
logger.info(F'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(__lowerCAmelCase )
logger.info(F'=> removing {file_name}' )
if __name__ == "__main__":
snake_case : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
f"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
snake_case : List[str] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 657 | 1 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
snake_case : int = logging.getLogger(__name__)
@dataclass
class snake_case_ :
UpperCAmelCase__ : str
UpperCAmelCase__ : List[str]
UpperCAmelCase__ : Optional[List[str]]
@dataclass
class snake_case_ :
UpperCAmelCase__ : List[int]
UpperCAmelCase__ : List[int]
UpperCAmelCase__ : Optional[List[int]] = None
UpperCAmelCase__ : Optional[List[int]] = None
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : List[Any] = '''train'''
UpperCAmelCase__ : str = '''dev'''
UpperCAmelCase__ : Union[str, Any] = '''test'''
class snake_case_ :
@staticmethod
def lowerCamelCase__( __snake_case :List[str] ,__snake_case :Union[Split, str] ) -> List[InputExample]:
raise NotImplementedError
@staticmethod
def lowerCamelCase__( __snake_case :str ) -> List[str]:
raise NotImplementedError
@staticmethod
def lowerCamelCase__( __snake_case :List[InputExample] ,__snake_case :List[str] ,__snake_case :int ,__snake_case :PreTrainedTokenizer ,__snake_case :List[Any]=False ,__snake_case :str="[CLS]" ,__snake_case :List[str]=1 ,__snake_case :str="[SEP]" ,__snake_case :Optional[int]=False ,__snake_case :List[str]=False ,__snake_case :List[Any]=0 ,__snake_case :Optional[int]=0 ,__snake_case :int=-1_00 ,__snake_case :str=0 ,__snake_case :Union[str, Any]=True ,) -> List[InputFeatures]:
a__ = {label: i for i, label in enumerate(__snake_case )}
a__ = []
for ex_index, example in enumerate(__snake_case ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d of %d' ,__snake_case ,len(__snake_case ) )
a__ = []
a__ = []
for word, label in zip(example.words ,example.labels ):
a__ = tokenizer.tokenize(__snake_case )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(__snake_case ) > 0:
tokens.extend(__snake_case )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(__snake_case ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
a__ = tokenizer.num_special_tokens_to_add()
if len(__snake_case ) > max_seq_length - special_tokens_count:
a__ = tokens[: (max_seq_length - special_tokens_count)]
a__ = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
a__ = [sequence_a_segment_id] * len(__snake_case )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
a__ = [cls_token] + tokens
a__ = [pad_token_label_id] + label_ids
a__ = [cls_token_segment_id] + segment_ids
a__ = tokenizer.convert_tokens_to_ids(__snake_case )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
a__ = [1 if mask_padding_with_zero else 0] * len(__snake_case )
# Zero-pad up to the sequence length.
a__ = max_seq_length - len(__snake_case )
if pad_on_left:
a__ = ([pad_token] * padding_length) + input_ids
a__ = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
a__ = ([pad_token_segment_id] * padding_length) + segment_ids
a__ = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(__snake_case ) == max_seq_length
assert len(__snake_case ) == max_seq_length
assert len(__snake_case ) == max_seq_length
assert len(__snake_case ) == max_seq_length
if ex_index < 5:
logger.info('*** Example ***' )
logger.info('guid: %s' ,example.guid )
logger.info('tokens: %s' ,' '.join([str(__snake_case ) for x in tokens] ) )
logger.info('input_ids: %s' ,' '.join([str(__snake_case ) for x in input_ids] ) )
logger.info('input_mask: %s' ,' '.join([str(__snake_case ) for x in input_mask] ) )
logger.info('segment_ids: %s' ,' '.join([str(__snake_case ) for x in segment_ids] ) )
logger.info('label_ids: %s' ,' '.join([str(__snake_case ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
a__ = None
features.append(
InputFeatures(
input_ids=__snake_case ,attention_mask=__snake_case ,token_type_ids=__snake_case ,label_ids=__snake_case ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : List[InputFeatures]
UpperCAmelCase__ : int = nn.CrossEntropyLoss().ignore_index
def __init__( self :Union[str, Any] ,__snake_case :TokenClassificationTask ,__snake_case :str ,__snake_case :PreTrainedTokenizer ,__snake_case :List[str] ,__snake_case :str ,__snake_case :Optional[int] = None ,__snake_case :Tuple=False ,__snake_case :Split = Split.train ,) -> List[Any]:
# Load data features from cache or dataset file
a__ = os.path.join(
__snake_case ,'cached_{}_{}_{}'.format(mode.value ,tokenizer.__class__.__name__ ,str(__snake_case ) ) ,)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
a__ = cached_features_file + '.lock'
with FileLock(__snake_case ):
if os.path.exists(__snake_case ) and not overwrite_cache:
logger.info(F'Loading features from cached file {cached_features_file}' )
a__ = torch.load(__snake_case )
else:
logger.info(F'Creating features from dataset file at {data_dir}' )
a__ = token_classification_task.read_examples_from_file(__snake_case ,__snake_case )
# TODO clean up all this to leverage built-in features of tokenizers
a__ = token_classification_task.convert_examples_to_features(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,cls_token_at_end=bool(model_type in ['xlnet'] ) ,cls_token=tokenizer.cls_token ,cls_token_segment_id=2 if model_type in ['xlnet'] else 0 ,sep_token=tokenizer.sep_token ,sep_token_extra=__snake_case ,pad_on_left=bool(tokenizer.padding_side == 'left' ) ,pad_token=tokenizer.pad_token_id ,pad_token_segment_id=tokenizer.pad_token_type_id ,pad_token_label_id=self.pad_token_label_id ,)
logger.info(F'Saving features into cached file {cached_features_file}' )
torch.save(self.features ,__snake_case )
def __len__( self :List[Any] ) -> str:
return len(self.features )
def __getitem__( self :Tuple ,__snake_case :Any ) -> InputFeatures:
return self.features[i]
if is_tf_available():
import tensorflow as tf
class snake_case_ :
UpperCAmelCase__ : List[InputFeatures]
UpperCAmelCase__ : int = -1_0_0
def __init__( self :Optional[Any] ,__snake_case :TokenClassificationTask ,__snake_case :str ,__snake_case :PreTrainedTokenizer ,__snake_case :List[str] ,__snake_case :str ,__snake_case :Optional[int] = None ,__snake_case :Optional[int]=False ,__snake_case :Split = Split.train ,) -> List[str]:
a__ = token_classification_task.read_examples_from_file(__snake_case ,__snake_case )
# TODO clean up all this to leverage built-in features of tokenizers
a__ = token_classification_task.convert_examples_to_features(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,cls_token_at_end=bool(model_type in ['xlnet'] ) ,cls_token=tokenizer.cls_token ,cls_token_segment_id=2 if model_type in ['xlnet'] else 0 ,sep_token=tokenizer.sep_token ,sep_token_extra=__snake_case ,pad_on_left=bool(tokenizer.padding_side == 'left' ) ,pad_token=tokenizer.pad_token_id ,pad_token_segment_id=tokenizer.pad_token_type_id ,pad_token_label_id=self.pad_token_label_id ,)
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
a__ = tf.data.Dataset.from_generator(
__snake_case ,({'input_ids': tf.intaa, 'attention_mask': tf.intaa}, tf.intaa) ,(
{'input_ids': tf.TensorShape([None] ), 'attention_mask': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) ,)
else:
a__ = tf.data.Dataset.from_generator(
__snake_case ,({'input_ids': tf.intaa, 'attention_mask': tf.intaa, 'token_type_ids': tf.intaa}, tf.intaa) ,(
{
'input_ids': tf.TensorShape([None] ),
'attention_mask': tf.TensorShape([None] ),
'token_type_ids': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) ,)
def lowerCamelCase__( self :int ) -> Union[str, Any]:
a__ = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self :Optional[int] ) -> Tuple:
return len(self.features )
def __getitem__( self :Union[str, Any] ,__snake_case :int ) -> InputFeatures:
return self.features[i]
| 657 |
from math import ceil, sqrt
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_0_0_0 ):
a__ = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
a__ = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
a__ = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 657 | 1 |
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
snake_case : Dict = logging.getLogger()
def __lowercase ( __lowerCAmelCase : Optional[Any] ):
a__ = {}
a__ = os.path.join(__lowerCAmelCase , 'all_results.json' )
if os.path.exists(__lowerCAmelCase ):
with open(__lowerCAmelCase , 'r' ) as f:
a__ = json.load(__lowerCAmelCase )
else:
raise ValueError(F'can\'t find {path}' )
return results
snake_case : Optional[int] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class snake_case_ (lowerCamelCase_ ):
def lowerCamelCase__( self :Any ) -> Dict:
import xla_spawn
a__ = self.get_auto_remove_tmp_dir()
a__ = F'\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split()
with patch.object(__snake_case ,'argv' ,__snake_case ):
a__ = time()
xla_spawn.main()
a__ = time()
a__ = get_results(__snake_case )
self.assertGreaterEqual(result['eval_accuracy'] ,0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start ,5_00 )
def lowerCamelCase__( self :List[Any] ) -> str:
import xla_spawn
a__ = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(__snake_case ,'argv' ,__snake_case ):
xla_spawn.main()
| 657 |
from sklearn.metrics import fa_score
import datasets
snake_case : Optional[int] = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
snake_case : List[Any] = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
snake_case : Union[str, Any] = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ (datasets.Metric ):
def lowerCamelCase__( self :Any ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) ,reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] ,)
def lowerCamelCase__( self :Dict ,__snake_case :str ,__snake_case :str ,__snake_case :Dict=None ,__snake_case :str=1 ,__snake_case :Optional[int]="binary" ,__snake_case :Union[str, Any]=None ) -> Tuple:
a__ = fa_score(
__snake_case ,__snake_case ,labels=__snake_case ,pos_label=__snake_case ,average=__snake_case ,sample_weight=__snake_case )
return {"f1": float(__snake_case ) if score.size == 1 else score}
| 657 | 1 |
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
snake_case : Any = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class snake_case_ (lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : int = XLNetTokenizer
UpperCAmelCase__ : Optional[Any] = XLNetTokenizerFast
UpperCAmelCase__ : int = True
UpperCAmelCase__ : int = True
def lowerCamelCase__( self :Optional[int] ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
a__ = XLNetTokenizer(__snake_case ,keep_accents=__snake_case )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__( self :Union[str, Any] ) -> Optional[int]:
a__ = '<s>'
a__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) ,__snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) ,__snake_case )
def lowerCamelCase__( self :Union[str, Any] ) -> List[Any]:
a__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<unk>' )
self.assertEqual(vocab_keys[1] ,'<s>' )
self.assertEqual(vocab_keys[-1] ,'<eod>' )
self.assertEqual(len(__snake_case ) ,10_06 )
def lowerCamelCase__( self :List[Any] ) -> int:
self.assertEqual(self.get_tokenizer().vocab_size ,10_00 )
def lowerCamelCase__( self :List[Any] ) -> List[Any]:
a__ = XLNetTokenizer(__snake_case ,keep_accents=__snake_case )
a__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(__snake_case ,['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) ,[2_85, 46, 10, 1_70, 3_82] )
a__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__snake_case ,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] ,)
a__ = tokenizer.convert_tokens_to_ids(__snake_case )
self.assertListEqual(__snake_case ,[8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
a__ = tokenizer.convert_ids_to_tokens(__snake_case )
self.assertListEqual(
__snake_case ,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] ,)
def lowerCamelCase__( self :Optional[int] ) -> List[Any]:
a__ = XLNetTokenizer(__snake_case ,do_lower_case=__snake_case )
a__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__snake_case ,[
SPIECE_UNDERLINE + '',
'i',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
'se',
'.',
] ,)
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['▁he', 'll', 'o'] )
def lowerCamelCase__( self :int ) -> List[Any]:
a__ = XLNetTokenizer(__snake_case ,do_lower_case=__snake_case )
a__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__snake_case ,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
'se',
'.',
] ,)
@slow
def lowerCamelCase__( self :Optional[Any] ) -> str:
a__ = XLNetTokenizer.from_pretrained('xlnet-base-cased' )
a__ = tokenizer.encode('sequence builders' ,add_special_tokens=__snake_case )
a__ = tokenizer.encode('multi-sequence build' ,add_special_tokens=__snake_case )
a__ = tokenizer.build_inputs_with_special_tokens(__snake_case )
a__ = tokenizer.build_inputs_with_special_tokens(__snake_case ,__snake_case )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def lowerCamelCase__( self :Any ) -> Optional[int]:
# fmt: off
a__ = {'input_ids': [[17, 2_14_42, 2_70, 17, 10, 1_46_45, 3_18, 34, 17, 45_46, 31_45, 7_87, 13, 77_52, 2_20_18, 23, 21, 17, 45_46, 31_45, 7_87, 13, 33_52, 1_44_31, 13, 55_00, 11, 11_76, 5_80, 13, 1_68_19, 47_97, 23, 17, 10, 1_71_35, 6_58, 19, 4_57, 79_32, 13, 1_84, 19, 31_54, 1_71_35, 64_68, 19, 14_04, 1_22_69, 19, 42_29, 53_56, 1_62_64, 46, 19, 17, 2_05_45, 1_03_95, 9, 9, 9, 11, 28, 64_21, 95_31, 2_07_29, 17, 10, 3_53, 1_70_22, 11, 21, 64_21, 95_31, 1_69_49, 17, 10, 1_15_09, 7_53, 11, 33, 95, 24_21, 73_85, 9_56, 1_44_31, 26_26, 25, 8_42, 73_85, 48_36, 21, 14_29, 22_72, 98_55, 31_20, 1_61, 2_47_38, 19, 1_32_03, 6_58, 2_18, 7_87, 21, 4_30, 1_84_82, 8_47, 26_37, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_22, 2_21_78, 27, 10_64, 22, 9_56, 13, 1_11_01, 14_29, 58_54, 2_43_13, 1_89_53, 40, 4_22, 2_43_66, 68, 17_58, 37, 1_04_83, 1_42_57, 31, 2_07, 2_63, 21, 2_03, 37_73, 25, 71, 97_35, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 20_49, 34_42, 17, 1_38_94, 33_80, 23, 95, 18, 1_76_34, 22_88, 9, 4, 3]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case ,model_name='xlnet-base-cased' ,revision='c841166438c31ec7ca9a106dee7bb312b73ae511' ,)
| 657 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
snake_case : Any = logging.get_logger(__name__)
snake_case : Tuple = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class snake_case_ (lowerCamelCase_ ):
def __init__( self :str ,__snake_case :Dict=None ,__snake_case :int=None ,*__snake_case :str ,**__snake_case :Union[str, Any] ) -> Tuple:
super().__init__(*__snake_case ,**__snake_case )
if config is None:
assert isinstance(self.model ,__snake_case ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F' {self.model.__class__}'
)
a__ = self.model.config
else:
a__ = config
a__ = data_args
a__ = self.config.tgt_vocab_size if isinstance(self.config ,__snake_case ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'
' padding..' )
if self.args.label_smoothing == 0:
a__ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
a__ = label_smoothed_nll_loss
def lowerCamelCase__( self :Optional[Any] ,__snake_case :int ) -> Tuple:
if self.optimizer is None:
a__ = ['bias', 'LayerNorm.weight']
a__ = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
a__ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
a__ = Adafactor
a__ = {'scale_parameter': False, 'relative_step': False}
else:
a__ = AdamW
a__ = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
a__ = self.args.learning_rate
if self.sharded_ddp:
a__ = OSS(
params=__snake_case ,optim=__snake_case ,**__snake_case ,)
else:
a__ = optimizer_cls(__snake_case ,**__snake_case )
if self.lr_scheduler is None:
a__ = self._get_lr_scheduler(__snake_case )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def lowerCamelCase__( self :Dict ,__snake_case :List[str] ) -> Union[str, Any]:
a__ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
a__ = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
a__ = schedule_func(self.optimizer ,num_warmup_steps=self.args.warmup_steps )
else:
a__ = schedule_func(
self.optimizer ,num_warmup_steps=self.args.warmup_steps ,num_training_steps=__snake_case )
return scheduler
def lowerCamelCase__( self :Optional[Any] ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset ,torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size ,distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) ,)
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowerCamelCase__( self :str ,__snake_case :Optional[int] ,__snake_case :List[Any] ,__snake_case :Any ) -> Optional[Any]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
a__ = model(**__snake_case ,use_cache=__snake_case )[0]
a__ = self.loss_fn(logits.view(-1 ,logits.shape[-1] ) ,labels.view(-1 ) )
else:
# compute usual loss via models
a__ , a__ = model(**__snake_case ,labels=__snake_case ,use_cache=__snake_case )[:2]
else:
# compute label smoothed loss
a__ = model(**__snake_case ,use_cache=__snake_case )[0]
a__ = torch.nn.functional.log_softmax(__snake_case ,dim=-1 )
a__ , a__ = self.loss_fn(__snake_case ,__snake_case ,self.args.label_smoothing ,ignore_index=self.config.pad_token_id )
return loss, logits
def lowerCamelCase__( self :List[Any] ,__snake_case :Dict ,__snake_case :Optional[int] ) -> Any:
a__ = inputs.pop('labels' )
a__ , a__ = self._compute_loss(__snake_case ,__snake_case ,__snake_case )
return loss
def lowerCamelCase__( self :Optional[Any] ,__snake_case :nn.Module ,__snake_case :Dict[str, Union[torch.Tensor, Any]] ,__snake_case :bool ,__snake_case :Optional[List[str]] = None ,) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
a__ = self._prepare_inputs(__snake_case )
a__ = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
a__ = self.model.generate(
inputs['input_ids'] ,attention_mask=inputs['attention_mask'] ,**__snake_case ,)
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
a__ = self._pad_tensors_to_max_len(__snake_case ,gen_kwargs['max_length'] )
a__ = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
a__ , a__ = self._compute_loss(__snake_case ,__snake_case ,__snake_case )
a__ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
a__ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
a__ = self._pad_tensors_to_max_len(__snake_case ,gen_kwargs['max_length'] )
return (loss, logits, labels)
def lowerCamelCase__( self :List[str] ,__snake_case :Optional[Any] ,__snake_case :Union[str, Any] ) -> int:
# If PAD token is not defined at least EOS token has to be defined
a__ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
F' padded to `max_length`={max_length}' )
a__ = pad_token_id * torch.ones(
(tensor.shape[0], max_length) ,dtype=tensor.dtype ,device=tensor.device )
a__ = tensor
return padded_tensor
| 657 | 1 |
import operator as op
def __lowercase ( __lowerCAmelCase : int ):
a__ = []
a__ = lambda __lowerCAmelCase , __lowerCAmelCase : int(x / y ) # noqa: E731 integer division operation
a__ = {
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ) , 'Action'.center(1_2 ) , 'Stack' , sep=' | ' )
print('-' * (3_0 + len(__lowerCAmelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__lowerCAmelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('push(' + x + ')').ljust(1_2 ) , ','.join(__lowerCAmelCase ) , sep=' | ' )
else:
a__ = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + b + ')').ljust(1_2 ) , ','.join(__lowerCAmelCase ) , sep=' | ' )
a__ = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + a + ')').ljust(1_2 ) , ','.join(__lowerCAmelCase ) , sep=' | ' )
stack.append(
str(opr[x](int(__lowerCAmelCase ) , int(__lowerCAmelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('push(' + a + x + b + ')').ljust(1_2 ) , ','.join(__lowerCAmelCase ) , sep=' | ' , )
return int(stack[0] )
if __name__ == "__main__":
snake_case : Tuple = input('''\n\nEnter a Postfix Equation (space separated) = ''').split(''' ''')
print('''\n\tResult = ''', solve(Postfix))
| 657 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
snake_case : Dict = '''
Human: <<task>>
Assistant: '''
snake_case : Optional[int] = '''huggingface-tools/default-prompts'''
snake_case : Tuple = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any="run" ):
if prompt_or_repo_id is None:
a__ = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('\\s' , __lowerCAmelCase ) is not None:
return prompt_or_repo_id
a__ = cached_file(
__lowerCAmelCase , PROMPT_FILES[mode] , repo_type='dataset' , user_agent={'agent': agent_name} )
with open(__lowerCAmelCase , 'r' , encoding='utf-8' ) as f:
return f.read()
| 657 | 1 |
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict ):
a__ = multiprocessing.Manager()
a__ = manager.list()
a__ = multiprocessing.Process(target=__lowerCAmelCase , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('timed out' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] ):
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
a__ = shutil.rmtree
a__ = os.rmdir
a__ = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
a__ = {}
with swallow_io():
with time_limit(__lowerCAmelCase ):
exec(__lowerCAmelCase , __lowerCAmelCase )
result.append('passed' )
except TimeoutException:
result.append('timed out' )
except BaseException as e:
result.append(F'failed: {e}' )
# Needed for cleaning up.
a__ = rmtree
a__ = rmdir
a__ = chdir
@contextlib.contextmanager
def __lowercase ( __lowerCAmelCase : Any ):
def signal_handler(__lowerCAmelCase : Tuple , __lowerCAmelCase : Dict ):
raise TimeoutException('Timed out!' )
signal.setitimer(signal.ITIMER_REAL , __lowerCAmelCase )
signal.signal(signal.SIGALRM , __lowerCAmelCase )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def __lowercase ( ):
a__ = WriteOnlyStringIO()
with contextlib.redirect_stdout(__lowerCAmelCase ):
with contextlib.redirect_stderr(__lowerCAmelCase ):
with redirect_stdin(__lowerCAmelCase ):
yield
@contextlib.contextmanager
def __lowercase ( ):
with tempfile.TemporaryDirectory() as dirname:
with chdir(__lowerCAmelCase ):
yield dirname
class snake_case_ (lowerCamelCase_ ):
pass
class snake_case_ (io.StringIO ):
def lowerCamelCase__( self :Any ,*__snake_case :Tuple ,**__snake_case :List[str] ) -> Union[str, Any]:
raise OSError
def lowerCamelCase__( self :Optional[int] ,*__snake_case :Any ,**__snake_case :List[Any] ) -> Any:
raise OSError
def lowerCamelCase__( self :Tuple ,*__snake_case :Optional[Any] ,**__snake_case :List[Any] ) -> Union[str, Any]:
raise OSError
def lowerCamelCase__( self :Any ,*__snake_case :Union[str, Any] ,**__snake_case :Union[str, Any] ) -> str:
return False
class snake_case_ (contextlib._RedirectStream ): # type: ignore
UpperCAmelCase__ : str = '''stdin'''
@contextlib.contextmanager
def __lowercase ( __lowerCAmelCase : Union[str, Any] ):
if root == ".":
yield
return
a__ = os.getcwd()
os.chdir(__lowerCAmelCase )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(__lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : str=None ):
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
a__ = None
a__ = None
import os
a__ = '1'
a__ = None
a__ = None
a__ = None
a__ = None
a__ = None
a__ = None
a__ = None
a__ = None
a__ = None
a__ = None
a__ = None
a__ = None
a__ = None
a__ = None
a__ = None
a__ = None
a__ = None
a__ = None
a__ = None
a__ = None
a__ = None
a__ = None
a__ = None
a__ = None
a__ = None
a__ = None
a__ = None
import shutil
a__ = None
a__ = None
a__ = None
import subprocess
a__ = None # type: ignore
a__ = None
import sys
a__ = None
a__ = None
a__ = None
a__ = None
a__ = None
| 657 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
a__ = precision
a__ = ceil(precision / 1_4 )
a__ = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
a__ = 1
a__ = 1_3_5_9_1_4_0_9
a__ = Decimal(__lowerCAmelCase )
for k in range(1 , __lowerCAmelCase ):
a__ = factorial(6 * k ) // (factorial(3 * k ) * factorial(__lowerCAmelCase ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
snake_case : Tuple = 50
print(f"""The first {n} digits of pi is: {pi(n)}""")
| 657 | 1 |
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(__lowerCAmelCase , int(b / 2 ) ) * actual_power(__lowerCAmelCase , int(b / 2 ) )
else:
return a * actual_power(__lowerCAmelCase , int(b / 2 ) ) * actual_power(__lowerCAmelCase , int(b / 2 ) )
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
if b < 0:
return 1 / actual_power(__lowerCAmelCase , __lowerCAmelCase )
return actual_power(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 657 |
def __lowercase ( __lowerCAmelCase : int = 2_0_0 ):
a__ = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
a__ = [0] * (pence + 1)
a__ = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(__lowerCAmelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82
| 657 | 1 |
from math import factorial
snake_case : List[Any] = {str(d): factorial(d) for d in range(10)}
def __lowercase ( __lowerCAmelCase : int ):
return sum(DIGIT_FACTORIAL[d] for d in str(__lowerCAmelCase ) )
def __lowercase ( ):
a__ = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , __lowerCAmelCase ) if sum_of_digit_factorial(__lowerCAmelCase ) == i )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 657 |
from manim import *
class snake_case_ (lowerCamelCase_ ):
def lowerCamelCase__( self :Optional[Any] ) -> Optional[int]:
a__ = Rectangle(height=0.5 ,width=0.5 )
a__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
a__ = Rectangle(height=0.25 ,width=0.25 )
a__ = [mem.copy() for i in range(6 )]
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('CPU' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__snake_case )
a__ = [mem.copy() for i in range(4 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('GPU' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
gpu.move_to([-1, -1, 0] )
self.add(__snake_case )
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('Model' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
model.move_to([3, -1.0, 0] )
self.add(__snake_case )
a__ = []
a__ = []
for i, rect in enumerate(__snake_case ):
a__ = fill.copy().set_fill(__snake_case ,opacity=0.8 )
target.move_to(__snake_case )
model_arr.append(__snake_case )
a__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(__snake_case ,opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__snake_case )
self.add(*__snake_case ,*__snake_case )
a__ = [meta_mem.copy() for i in range(6 )]
a__ = [meta_mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('Disk' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
disk.move_to([-4, -1.25, 0] )
self.add(__snake_case ,__snake_case )
a__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
a__ = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(__snake_case ,__snake_case )
a__ = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' ,font_size=18 ,)
blue_text.next_to(__snake_case ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(__snake_case )
a__ = MarkupText(
F'Now watch as an input is passed through the model\nand how the memory is utilized and handled.' ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ) )
a__ = Square(0.3 )
input.set_fill(__snake_case ,opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] ,__snake_case ,buff=0.5 )
self.play(Write(__snake_case ) )
input.generate_target()
input.target.next_to(model_arr[0] ,direction=__snake_case ,buff=0.02 )
self.play(MoveToTarget(__snake_case ) )
self.play(FadeOut(__snake_case ) )
a__ = Arrow(start=__snake_case ,end=__snake_case ,color=__snake_case ,buff=0.5 )
a.next_to(model_arr[0].get_left() ,__snake_case ,buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
a__ = MarkupText(
F'As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.' ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ,run_time=3 ) )
a__ = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(__snake_case ) ,Circumscribe(model_arr[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(model_cpu_arr[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,)
self.play(MoveToTarget(model_cpu_arr[0] ) )
a__ = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 ,__snake_case ,buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
a__ = AnimationGroup(
FadeOut(__snake_case ,run_time=0.5 ) ,MoveToTarget(__snake_case ,run_time=0.5 ) ,FadeIn(__snake_case ,run_time=0.5 ) ,lag_ratio=0.2 )
self.play(__snake_case )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
a__ = 0.7
self.play(
Circumscribe(model_arr[i] ,**__snake_case ) ,Circumscribe(cpu_left_col_base[i] ,**__snake_case ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(model_arr[i + 1] ,color=__snake_case ,**__snake_case ) ,)
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,)
else:
self.play(
MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,)
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.02 ,buff=0.2 )
self.play(
Circumscribe(model_arr[-1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(cpu_left_col_base[-1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,)
self.play(MoveToTarget(model_cpu_arr[i] ) )
a__ = a_c
a__ = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] ,RIGHT + 0.02 ,buff=0.5 )
self.play(
FadeOut(__snake_case ) ,FadeOut(__snake_case ,run_time=0.5 ) ,)
a__ = MarkupText(F'Inference on a model too large for GPU memory\nis successfully completed.' ,font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ,run_time=3 ) ,MoveToTarget(__snake_case ) )
self.wait()
| 657 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case : List[str] = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Any = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[Any] = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : str = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[int] = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
snake_case : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 |
from math import pi
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 657 | 1 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
snake_case : Dict = logging.get_logger(__name__)
snake_case : Any = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
a__ = TOKENIZER_CLASSES
else:
a__ = {tokenizer_name: getattr(__lowerCAmelCase , tokenizer_name + 'Fast' )}
logger.info(F'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
a__ = TOKENIZER_CLASSES[tokenizer_name]
a__ = True
if checkpoint_name is None:
a__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
a__ = [checkpoint_name]
logger.info(F'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(F'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
a__ = tokenizer_class.from_pretrained(__lowerCAmelCase , force_download=__lowerCAmelCase )
# Save fast tokenizer
logger.info(F'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
a__ , a__ = checkpoint.split('/' )
a__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
elif add_prefix:
a__ = checkpoint
a__ = dump_path
else:
a__ = None
a__ = dump_path
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
a__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
a__ = file_path.split(__lowerCAmelCase )[-1][0]
if next_char == "/":
a__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
a__ = None
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
a__ = tokenizer.save_pretrained(
__lowerCAmelCase , legacy_format=__lowerCAmelCase , filename_prefix=__lowerCAmelCase )
logger.info(F'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(__lowerCAmelCase )
logger.info(F'=> removing {file_name}' )
if __name__ == "__main__":
snake_case : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
f"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
snake_case : List[str] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 657 |
from math import sqrt
def __lowercase ( __lowerCAmelCase : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_1 ):
a__ = 0
a__ = 1
while count != nth and number < 3:
number += 1
if is_prime(__lowerCAmelCase ):
count += 1
while count != nth:
number += 2
if is_prime(__lowerCAmelCase ):
count += 1
return number
if __name__ == "__main__":
print(f"""{solution() = }""")
| 657 | 1 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
snake_case : Any = logging.get_logger(__name__)
snake_case : Tuple = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class snake_case_ (lowerCamelCase_ ):
def __init__( self :str ,__snake_case :Dict=None ,__snake_case :int=None ,*__snake_case :str ,**__snake_case :Union[str, Any] ) -> Tuple:
super().__init__(*__snake_case ,**__snake_case )
if config is None:
assert isinstance(self.model ,__snake_case ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F' {self.model.__class__}'
)
a__ = self.model.config
else:
a__ = config
a__ = data_args
a__ = self.config.tgt_vocab_size if isinstance(self.config ,__snake_case ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'
' padding..' )
if self.args.label_smoothing == 0:
a__ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
a__ = label_smoothed_nll_loss
def lowerCamelCase__( self :Optional[Any] ,__snake_case :int ) -> Tuple:
if self.optimizer is None:
a__ = ['bias', 'LayerNorm.weight']
a__ = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
a__ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
a__ = Adafactor
a__ = {'scale_parameter': False, 'relative_step': False}
else:
a__ = AdamW
a__ = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
a__ = self.args.learning_rate
if self.sharded_ddp:
a__ = OSS(
params=__snake_case ,optim=__snake_case ,**__snake_case ,)
else:
a__ = optimizer_cls(__snake_case ,**__snake_case )
if self.lr_scheduler is None:
a__ = self._get_lr_scheduler(__snake_case )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def lowerCamelCase__( self :Dict ,__snake_case :List[str] ) -> Union[str, Any]:
a__ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
a__ = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
a__ = schedule_func(self.optimizer ,num_warmup_steps=self.args.warmup_steps )
else:
a__ = schedule_func(
self.optimizer ,num_warmup_steps=self.args.warmup_steps ,num_training_steps=__snake_case )
return scheduler
def lowerCamelCase__( self :Optional[Any] ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset ,torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size ,distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) ,)
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowerCamelCase__( self :str ,__snake_case :Optional[int] ,__snake_case :List[Any] ,__snake_case :Any ) -> Optional[Any]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
a__ = model(**__snake_case ,use_cache=__snake_case )[0]
a__ = self.loss_fn(logits.view(-1 ,logits.shape[-1] ) ,labels.view(-1 ) )
else:
# compute usual loss via models
a__ , a__ = model(**__snake_case ,labels=__snake_case ,use_cache=__snake_case )[:2]
else:
# compute label smoothed loss
a__ = model(**__snake_case ,use_cache=__snake_case )[0]
a__ = torch.nn.functional.log_softmax(__snake_case ,dim=-1 )
a__ , a__ = self.loss_fn(__snake_case ,__snake_case ,self.args.label_smoothing ,ignore_index=self.config.pad_token_id )
return loss, logits
def lowerCamelCase__( self :List[Any] ,__snake_case :Dict ,__snake_case :Optional[int] ) -> Any:
a__ = inputs.pop('labels' )
a__ , a__ = self._compute_loss(__snake_case ,__snake_case ,__snake_case )
return loss
def lowerCamelCase__( self :Optional[Any] ,__snake_case :nn.Module ,__snake_case :Dict[str, Union[torch.Tensor, Any]] ,__snake_case :bool ,__snake_case :Optional[List[str]] = None ,) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
a__ = self._prepare_inputs(__snake_case )
a__ = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
a__ = self.model.generate(
inputs['input_ids'] ,attention_mask=inputs['attention_mask'] ,**__snake_case ,)
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
a__ = self._pad_tensors_to_max_len(__snake_case ,gen_kwargs['max_length'] )
a__ = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
a__ , a__ = self._compute_loss(__snake_case ,__snake_case ,__snake_case )
a__ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
a__ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
a__ = self._pad_tensors_to_max_len(__snake_case ,gen_kwargs['max_length'] )
return (loss, logits, labels)
def lowerCamelCase__( self :List[str] ,__snake_case :Optional[Any] ,__snake_case :Union[str, Any] ) -> int:
# If PAD token is not defined at least EOS token has to be defined
a__ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
F' padded to `max_length`={max_length}' )
a__ = pad_token_id * torch.ones(
(tensor.shape[0], max_length) ,dtype=tensor.dtype ,device=tensor.device )
a__ = tensor
return padded_tensor
| 657 |
import unittest
from knapsack import greedy_knapsack as kp
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[Any] ) -> Union[str, Any]:
a__ = [10, 20, 30, 40, 50, 60]
a__ = [2, 4, 6, 8, 10, 12]
a__ = 1_00
self.assertEqual(kp.calc_profit(__snake_case ,__snake_case ,__snake_case ) ,2_10 )
def lowerCamelCase__( self :str ) -> Optional[int]:
self.assertRaisesRegex(__snake_case ,'max_weight must greater than zero.' )
def lowerCamelCase__( self :Optional[Any] ) -> int:
self.assertRaisesRegex(__snake_case ,'Weight can not be negative.' )
def lowerCamelCase__( self :str ) -> List[str]:
self.assertRaisesRegex(__snake_case ,'Profit can not be negative.' )
def lowerCamelCase__( self :str ) -> Optional[Any]:
self.assertRaisesRegex(__snake_case ,'max_weight must greater than zero.' )
def lowerCamelCase__( self :int ) -> List[Any]:
self.assertRaisesRegex(
__snake_case ,'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 657 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case : Any = logging.get_logger(__name__)
snake_case : int = '''▁'''
snake_case : int = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
snake_case : Optional[Any] = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
snake_case : Tuple = {'''vinai/bartpho-syllable''': 10_24}
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Tuple = VOCAB_FILES_NAMES
UpperCAmelCase__ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self :List[Any] ,__snake_case :List[str] ,__snake_case :int ,__snake_case :Dict="<s>" ,__snake_case :Union[str, Any]="</s>" ,__snake_case :Dict="</s>" ,__snake_case :List[str]="<s>" ,__snake_case :str="<unk>" ,__snake_case :Union[str, Any]="<pad>" ,__snake_case :int="<mask>" ,__snake_case :Optional[Dict[str, Any]] = None ,**__snake_case :str ,) -> None:
# Mask token behave like a normal word, i.e. include the space before it
a__ = AddedToken(__snake_case ,lstrip=__snake_case ,rstrip=__snake_case ) if isinstance(__snake_case ,__snake_case ) else mask_token
a__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__snake_case ,eos_token=__snake_case ,unk_token=__snake_case ,sep_token=__snake_case ,cls_token=__snake_case ,pad_token=__snake_case ,mask_token=__snake_case ,sp_model_kwargs=self.sp_model_kwargs ,**__snake_case ,)
a__ = vocab_file
a__ = monolingual_vocab_file
a__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__snake_case ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
a__ = {}
a__ = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__snake_case ) not in self.fairseq_tokens_to_ids:
a__ = cnt
cnt += 1
with open(__snake_case ,'r' ,encoding='utf-8' ) as f:
for line in f.readlines():
a__ = line.strip().split()[0]
a__ = len(self.fairseq_tokens_to_ids )
if str(__snake_case ) not in self.fairseq_tokens_to_ids:
a__ = len(self.fairseq_tokens_to_ids )
a__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self :Union[str, Any] ) -> Optional[Any]:
a__ = self.__dict__.copy()
a__ = None
a__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self :Any ,__snake_case :Dict ) -> str:
a__ = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
a__ = {}
a__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCamelCase__( self :Dict ,__snake_case :List[int] ,__snake_case :Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a__ = [self.cls_token_id]
a__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase__( self :Tuple ,__snake_case :List[int] ,__snake_case :Optional[List[int]] = None ,__snake_case :bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case ,token_ids_a=__snake_case ,already_has_special_tokens=__snake_case )
if token_ids_a is None:
return [1] + ([0] * len(__snake_case )) + [1]
return [1] + ([0] * len(__snake_case )) + [1, 1] + ([0] * len(__snake_case )) + [1]
def lowerCamelCase__( self :List[str] ,__snake_case :List[int] ,__snake_case :Optional[List[int]] = None ) -> List[int]:
a__ = [self.sep_token_id]
a__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase__( self :str ) -> Optional[Any]:
return len(self.fairseq_ids_to_tokens )
def lowerCamelCase__( self :Optional[Any] ) -> List[str]:
a__ = {self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase__( self :Dict ,__snake_case :str ) -> List[str]:
return self.sp_model.encode(__snake_case ,out_type=__snake_case )
def lowerCamelCase__( self :Dict ,__snake_case :Optional[int] ) -> Union[str, Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowerCamelCase__( self :Tuple ,__snake_case :Dict ) -> int:
return self.fairseq_ids_to_tokens[index]
def lowerCamelCase__( self :Dict ,__snake_case :Any ) -> Any:
a__ = ''.join(__snake_case ).replace(__snake_case ,' ' ).strip()
return out_string
def lowerCamelCase__( self :str ,__snake_case :str ,__snake_case :Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
a__ = os.path.join(
__snake_case ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
a__ = os.path.join(
__snake_case ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] ,)
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,__snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(__snake_case ,'wb' ) as fi:
a__ = self.sp_model.serialized_model_proto()
fi.write(__snake_case )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__snake_case ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file ,__snake_case )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__snake_case ,'w' ,encoding='utf-8' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F'{str(__snake_case )} \n' )
return out_vocab_file, out_monolingual_vocab_file
| 657 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Any=1_0 ):
a__ = []
for _ in range(__lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]=1_0 ):
a__ = []
for step in range(__lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = os.path.join(__lowerCAmelCase , 'schedule.bin' )
torch.save(scheduler.state_dict() , __lowerCAmelCase )
a__ = torch.load(__lowerCAmelCase )
scheduler.load_state_dict(__lowerCAmelCase )
return lrs
@require_torch
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[Any] ,__snake_case :int ,__snake_case :Union[str, Any] ) -> int:
self.assertEqual(len(__snake_case ) ,len(__snake_case ) )
for a, b in zip(__snake_case ,__snake_case ):
self.assertAlmostEqual(__snake_case ,__snake_case ,delta=__snake_case )
def lowerCamelCase__( self :Optional[Any] ) -> str:
a__ = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=__snake_case )
a__ = torch.tensor([0.4, 0.2, -0.5] )
a__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
a__ = AdamW(params=[w] ,lr=2E-1 ,weight_decay=0.0 )
for _ in range(1_00 ):
a__ = criterion(__snake_case ,__snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1E-2 )
def lowerCamelCase__( self :Tuple ) -> int:
a__ = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=__snake_case )
a__ = torch.tensor([0.4, 0.2, -0.5] )
a__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
a__ = Adafactor(
params=[w] ,lr=1E-2 ,eps=(1E-30, 1E-3) ,clip_threshold=1.0 ,decay_rate=-0.8 ,betaa=__snake_case ,weight_decay=0.0 ,relative_step=__snake_case ,scale_parameter=__snake_case ,warmup_init=__snake_case ,)
for _ in range(10_00 ):
a__ = criterion(__snake_case ,__snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1E-2 )
@require_torch
class snake_case_ (unittest.TestCase ):
UpperCAmelCase__ : str = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None
UpperCAmelCase__ : Dict = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
UpperCAmelCase__ : Optional[Any] = 1_0
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Optional[int] ,__snake_case :Tuple ,__snake_case :int ,__snake_case :Any=None ) -> Optional[Any]:
self.assertEqual(len(__snake_case ) ,len(__snake_case ) )
for a, b in zip(__snake_case ,__snake_case ):
self.assertAlmostEqual(__snake_case ,__snake_case ,delta=__snake_case ,msg=__snake_case )
def lowerCamelCase__( self :Tuple ) -> List[Any]:
a__ = {'num_warmup_steps': 2, 'num_training_steps': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
a__ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1E-7},
[0.0, 5.0, 10.0, 7.6_56, 5.6_25, 3.9_06, 2.5, 1.4_06, 0.6_25, 0.1_56],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 10.0, 8.1_65, 7.0_71, 6.3_25, 5.7_74, 5.3_45, 5.0, 4.7_14],
),
}
for scheduler_func, data in scheds.items():
a__ , a__ = data
a__ = scheduler_func(self.optimizer ,**__snake_case )
self.assertEqual(len([scheduler.get_lr()[0]] ) ,1 )
a__ = unwrap_schedule(__snake_case ,self.num_steps )
self.assertListAlmostEqual(
__snake_case ,__snake_case ,tol=1E-2 ,msg=F'failed for {scheduler_func} in normal scheduler' ,)
a__ = scheduler_func(self.optimizer ,**__snake_case )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(__snake_case ) # wrap to test picklability of the schedule
a__ = unwrap_and_save_reload_schedule(__snake_case ,self.num_steps )
self.assertListEqual(__snake_case ,__snake_case ,msg=F'failed for {scheduler_func} in save and reload' )
class snake_case_ :
def __init__( self :Tuple ,__snake_case :str ) -> Any:
a__ = fn
def __call__( self :List[str] ,*__snake_case :Optional[Any] ,**__snake_case :Optional[int] ) -> Union[str, Any]:
return self.fn(*__snake_case ,**__snake_case )
@classmethod
def lowerCamelCase__( self :Tuple ,__snake_case :Union[str, Any] ) -> Dict:
a__ = list(map(self ,scheduler.lr_lambdas ) )
| 657 | 1 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Optional[Any] = VQModel
UpperCAmelCase__ : Any = '''sample'''
@property
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Any=(32, 32) ) -> Any:
a__ = 4
a__ = 3
a__ = floats_tensor((batch_size, num_channels) + sizes ).to(__snake_case )
return {"sample": image}
@property
def lowerCamelCase__( self :List[str] ) -> str:
return (3, 32, 32)
@property
def lowerCamelCase__( self :Union[str, Any] ) -> List[Any]:
return (3, 32, 32)
def lowerCamelCase__( self :Optional[Any] ) -> Dict:
a__ = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 3,
}
a__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase__( self :Dict ) -> List[Any]:
pass
def lowerCamelCase__( self :List[str] ) -> List[str]:
pass
def lowerCamelCase__( self :List[Any] ) -> Dict:
a__ , a__ = VQModel.from_pretrained('fusing/vqgan-dummy' ,output_loading_info=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertEqual(len(loading_info['missing_keys'] ) ,0 )
model.to(__snake_case )
a__ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowerCamelCase__( self :Optional[int] ) -> int:
a__ = VQModel.from_pretrained('fusing/vqgan-dummy' )
model.to(__snake_case ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
a__ = torch.randn(1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size )
a__ = image.to(__snake_case )
with torch.no_grad():
a__ = model(__snake_case ).sample
a__ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
a__ = torch.tensor([-0.01_53, -0.40_44, -0.18_80, -0.51_61, -0.24_18, -0.40_72, -0.16_12, -0.06_33, -0.01_43] )
# fmt: on
self.assertTrue(torch.allclose(__snake_case ,__snake_case ,atol=1E-3 ) )
| 657 |
from __future__ import annotations
def __lowercase ( __lowerCAmelCase : list[int] ): # This function is recursive
a__ = len(__lowerCAmelCase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
a__ = array[0]
a__ = False
a__ = 1
a__ = []
while not is_found and i < array_length:
if array[i] < pivot:
a__ = True
a__ = [element for element in array[i:] if element >= array[i]]
a__ = longest_subsequence(__lowerCAmelCase )
if len(__lowerCAmelCase ) > len(__lowerCAmelCase ):
a__ = temp_array
else:
i += 1
a__ = [element for element in array[1:] if element >= pivot]
a__ = [pivot, *longest_subsequence(__lowerCAmelCase )]
if len(__lowerCAmelCase ) > len(__lowerCAmelCase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 | 1 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class snake_case_ (unittest.TestCase ):
UpperCAmelCase__ : List[str] = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase__ : str = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def lowerCamelCase__( self :List[Any] ,__snake_case :Optional[Any] ,__snake_case :Union[str, Any] ,__snake_case :Dict ) -> List[str]:
a__ = TextaTextGenerationPipeline(model=__snake_case ,tokenizer=__snake_case )
return generator, ["Something to write", "Something else"]
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Any ,__snake_case :int ) -> int:
a__ = generator('Something there' )
self.assertEqual(__snake_case ,[{'generated_text': ANY(__snake_case )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) )
a__ = generator(['This is great !', 'Something else'] ,num_return_sequences=2 ,do_sample=__snake_case )
self.assertEqual(
__snake_case ,[
[{'generated_text': ANY(__snake_case )}, {'generated_text': ANY(__snake_case )}],
[{'generated_text': ANY(__snake_case )}, {'generated_text': ANY(__snake_case )}],
] ,)
a__ = generator(
['This is great !', 'Something else'] ,num_return_sequences=2 ,batch_size=2 ,do_sample=__snake_case )
self.assertEqual(
__snake_case ,[
[{'generated_text': ANY(__snake_case )}, {'generated_text': ANY(__snake_case )}],
[{'generated_text': ANY(__snake_case )}, {'generated_text': ANY(__snake_case )}],
] ,)
with self.assertRaises(__snake_case ):
generator(4 )
@require_torch
def lowerCamelCase__( self :List[Any] ) -> Dict:
a__ = pipeline('text2text-generation' ,model='patrickvonplaten/t5-tiny-random' ,framework='pt' )
# do_sample=False necessary for reproducibility
a__ = generator('Something there' ,do_sample=__snake_case )
self.assertEqual(__snake_case ,[{'generated_text': ''}] )
a__ = 3
a__ = generator(
'Something there' ,num_return_sequences=__snake_case ,num_beams=__snake_case ,)
a__ = [
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(__snake_case ,__snake_case )
a__ = generator('This is a test' ,do_sample=__snake_case ,num_return_sequences=2 ,return_tensors=__snake_case )
self.assertEqual(
__snake_case ,[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
] ,)
a__ = generator.model.config.eos_token_id
a__ = '<pad>'
a__ = generator(
['This is a test', 'This is a second test'] ,do_sample=__snake_case ,num_return_sequences=2 ,batch_size=2 ,return_tensors=__snake_case ,)
self.assertEqual(
__snake_case ,[
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
] ,)
@require_tf
def lowerCamelCase__( self :Dict ) -> Dict:
a__ = pipeline('text2text-generation' ,model='patrickvonplaten/t5-tiny-random' ,framework='tf' )
# do_sample=False necessary for reproducibility
a__ = generator('Something there' ,do_sample=__snake_case )
self.assertEqual(__snake_case ,[{'generated_text': ''}] )
| 657 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
snake_case : Dict = logging.get_logger(__name__)
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Dict = ['''pixel_values''']
def __init__( self :Optional[Any] ,__snake_case :bool = True ,__snake_case :int = 32 ,__snake_case :Union[str, Any]=PILImageResampling.BILINEAR ,__snake_case :bool = True ,**__snake_case :Tuple ,) -> None:
a__ = do_resize
a__ = do_rescale
a__ = size_divisor
a__ = resample
super().__init__(**__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :np.ndarray ,__snake_case :int ,__snake_case :Tuple ,__snake_case :Optional[ChannelDimension] = None ,**__snake_case :List[Any] ) -> np.ndarray:
a__ , a__ = get_image_size(__snake_case )
# Rounds the height and width down to the closest multiple of size_divisor
a__ = height // size_divisor * size_divisor
a__ = width // size_divisor * size_divisor
a__ = resize(__snake_case ,(new_h, new_w) ,resample=__snake_case ,data_format=__snake_case ,**__snake_case )
return image
def lowerCamelCase__( self :List[str] ,__snake_case :np.ndarray ,__snake_case :float ,__snake_case :Optional[ChannelDimension] = None ,**__snake_case :str ) -> np.ndarray:
return rescale(image=__snake_case ,scale=__snake_case ,data_format=__snake_case ,**__snake_case )
def lowerCamelCase__( self :Tuple ,__snake_case :Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] ,__snake_case :Optional[bool] = None ,__snake_case :Optional[int] = None ,__snake_case :Union[str, Any]=None ,__snake_case :Optional[bool] = None ,__snake_case :Optional[Union[TensorType, str]] = None ,__snake_case :ChannelDimension = ChannelDimension.FIRST ,**__snake_case :List[Any] ,) -> BatchFeature:
a__ = do_resize if do_resize is not None else self.do_resize
a__ = do_rescale if do_rescale is not None else self.do_rescale
a__ = size_divisor if size_divisor is not None else self.size_divisor
a__ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
a__ = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
a__ = [to_numpy_array(__snake_case ) for img in images]
if do_resize:
a__ = [self.resize(__snake_case ,size_divisor=__snake_case ,resample=__snake_case ) for image in images]
if do_rescale:
a__ = [self.rescale(__snake_case ,scale=1 / 2_55 ) for image in images]
a__ = [to_channel_dimension_format(__snake_case ,__snake_case ) for image in images]
a__ = {'pixel_values': images}
return BatchFeature(data=__snake_case ,tensor_type=__snake_case )
| 657 | 1 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __lowercase ( ):
a__ = HfArgumentParser(__lowerCAmelCase )
a__ = parser.parse_args_into_dataclasses()[0]
a__ = TensorFlowBenchmark(args=__lowerCAmelCase )
try:
a__ = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
a__ = 'Arg --no_{0} is no longer used, please use --no-{0} instead.'
a__ = ' '.join(str(__lowerCAmelCase ).split(' ' )[:-1] )
a__ = ''
a__ = eval(str(__lowerCAmelCase ).split(' ' )[-1] )
a__ = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
a__ = full_error_msg + begin_error_msg + str(__lowerCAmelCase )
raise ValueError(__lowerCAmelCase )
benchmark.run()
if __name__ == "__main__":
main()
| 657 |
def __lowercase ( __lowerCAmelCase : int ):
a__ = generate_pascal_triangle(__lowerCAmelCase )
for row_idx in range(__lowerCAmelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=' ' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=' ' )
else:
print(triangle[row_idx][col_idx] , end='' )
print()
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
a__ = []
for current_row_idx in range(__lowerCAmelCase ):
a__ = populate_current_row(__lowerCAmelCase , __lowerCAmelCase )
triangle.append(__lowerCAmelCase )
return triangle
def __lowercase ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : int ):
a__ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
a__ , a__ = 1, 1
for current_col_idx in range(1 , __lowerCAmelCase ):
calculate_current_element(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return current_row
def __lowercase ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , ):
a__ = triangle[current_row_idx - 1][current_col_idx - 1]
a__ = triangle[current_row_idx - 1][current_col_idx]
a__ = above_to_left_elt + above_to_right_elt
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
a__ = [[1]]
for row_index in range(1 , __lowerCAmelCase ):
a__ = [0] + result[-1] + [0]
a__ = row_index + 1
# Calculate the number of distinct elements in a row
a__ = sum(divmod(__lowerCAmelCase , 2 ) )
a__ = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
a__ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
a__ = row_first_half + row_second_half
result.append(__lowerCAmelCase )
return result
def __lowercase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__lowerCAmelCase : Callable , __lowerCAmelCase : int ) -> None:
a__ = F'{func.__name__}({value})'
a__ = timeit(F'__main__.{call}' , setup='import __main__' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'{call:38} -- {timing:.4f} seconds' )
for value in range(1_5 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__lowerCAmelCase , __lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 657 | 1 |
def __lowercase ( __lowerCAmelCase : Tuple ): # noqa: E741
a__ = len(__lowerCAmelCase )
a__ = 0
a__ = [0] * n
a__ = [False] * n
a__ = [False] * n
def dfs(__lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ):
if parent == root:
out_edge_count += 1
a__ = True
a__ = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
a__ = dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
a__ = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
a__ = True
# AP found via cycle
if at == low[to]:
a__ = True
else:
a__ = min(low[at] , __lowerCAmelCase )
return out_edge_count
for i in range(__lowerCAmelCase ):
if not visited[i]:
a__ = 0
a__ = dfs(__lowerCAmelCase , __lowerCAmelCase , -1 , __lowerCAmelCase )
a__ = out_edge_count > 1
for x in range(len(__lowerCAmelCase ) ):
if is_art[x] is True:
print(__lowerCAmelCase )
# Adjacency list of graph
snake_case : str = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 657 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
snake_case : str = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
snake_case : str = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
snake_case : str = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
snake_case : Tuple = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
snake_case : str = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]),
('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
snake_case : Tuple = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
snake_case : int = (
('''JH AH TH KH QH''', 23),
('''JH 9H TH KH QH''', 22),
('''JC KH JS JD JH''', 21),
('''KH KC 3S 3H 3D''', 20),
('''8C 9C 5C 3C TC''', 19),
('''JS QS 9H TS KH''', 18),
('''7C 7S KH 2H 7H''', 17),
('''3C KH 5D 5S KH''', 16),
('''QH 8H KD JH 8S''', 15),
('''2D 6D 9D TH 7D''', 14),
)
def __lowercase ( ):
a__ , a__ = randrange(len(__lowerCAmelCase ) ), randrange(len(__lowerCAmelCase ) )
a__ = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
a__ , a__ = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __lowercase ( __lowerCAmelCase : int = 1_0_0 ):
return (generate_random_hand() for _ in range(__lowerCAmelCase ))
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] ):
assert PokerHand(__lowerCAmelCase )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
assert PokerHand(__lowerCAmelCase )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ):
a__ = PokerHand(__lowerCAmelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] ):
assert PokerHand(__lowerCAmelCase ).compare_with(PokerHand(__lowerCAmelCase ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase ).compare_with(PokerHand(__lowerCAmelCase ) ) == expected
def __lowercase ( ):
a__ = [PokerHand(__lowerCAmelCase ) for hand in SORTED_HANDS]
a__ = poker_hands.copy()
shuffle(__lowerCAmelCase )
a__ = chain(sorted(__lowerCAmelCase ) )
for index, hand in enumerate(__lowerCAmelCase ):
assert hand == poker_hands[index]
def __lowercase ( ):
# Test that five high straights are compared correctly.
a__ = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=__lowerCAmelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __lowercase ( ):
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
a__ = PokerHand('2C 4S AS 3D 5C' )
a__ = True
a__ = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __lowercase ( ):
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
a__ = 0
a__ = os.path.abspath(os.path.dirname(__lowerCAmelCase ) )
a__ = os.path.join(__lowerCAmelCase , 'poker_hands.txt' )
with open(__lowerCAmelCase ) as file_hand:
for line in file_hand:
a__ = line[:1_4].strip()
a__ = line[1_5:].strip()
a__ , a__ = PokerHand(__lowerCAmelCase ), PokerHand(__lowerCAmelCase )
a__ = player.compare_with(__lowerCAmelCase )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 657 | 1 |
from __future__ import annotations
from collections.abc import Callable
snake_case : Optional[Any] = list[list[float | int]]
def __lowercase ( __lowerCAmelCase : Matrix , __lowerCAmelCase : Matrix ):
a__ = len(__lowerCAmelCase )
a__ = [[0 for _ in range(size + 1 )] for _ in range(__lowerCAmelCase )]
a__ = 42
a__ = 42
a__ = 42
a__ = 42
a__ = 42
a__ = 42
for row in range(__lowerCAmelCase ):
for col in range(__lowerCAmelCase ):
a__ = matrix[row][col]
a__ = vector[row][0]
a__ = 0
a__ = 0
while row < size and col < size:
# pivoting
a__ = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__lowerCAmelCase , __lowerCAmelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
a__ , a__ = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __lowerCAmelCase ):
a__ = augmented[rowa][col] / augmented[row][col]
a__ = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __lowerCAmelCase ):
for row in range(__lowerCAmelCase ):
a__ = augmented[row][col] / augmented[col][col]
for cola in range(__lowerCAmelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 1_0 )] for row in range(__lowerCAmelCase )
]
def __lowercase ( __lowerCAmelCase : list[int] ):
a__ = len(__lowerCAmelCase )
a__ = [[0 for _ in range(__lowerCAmelCase )] for _ in range(__lowerCAmelCase )]
a__ = [[0] for _ in range(__lowerCAmelCase )]
a__ = 42
a__ = 42
a__ = 42
a__ = 42
for x_val, y_val in enumerate(__lowerCAmelCase ):
for col in range(__lowerCAmelCase ):
a__ = (x_val + 1) ** (size - col - 1)
a__ = y_val
a__ = solve(__lowerCAmelCase , __lowerCAmelCase )
def interpolated_func(__lowerCAmelCase : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__lowerCAmelCase ) )
return interpolated_func
def __lowercase ( __lowerCAmelCase : int ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**1_0
)
def __lowercase ( __lowerCAmelCase : Callable[[int], int] = question_function , __lowerCAmelCase : int = 1_0 ):
a__ = [func(__lowerCAmelCase ) for x_val in range(1 , order + 1 )]
a__ = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
a__ = 0
a__ = 42
a__ = 42
for poly in polynomials:
a__ = 1
while func(__lowerCAmelCase ) == poly(__lowerCAmelCase ):
x_val += 1
ret += poly(__lowerCAmelCase )
return ret
if __name__ == "__main__":
print(f"""{solution() = }""")
| 657 |
def __lowercase ( __lowerCAmelCase : int ):
if length <= 0 or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(__lowerCAmelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 657 | 1 |
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] ):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__lowerCAmelCase , n - 1 , __lowerCAmelCase ) * a) % mod
else:
a__ = binary_exponentiation(__lowerCAmelCase , n / 2 , __lowerCAmelCase )
return (b * b) % mod
# a prime number
snake_case : str = 7_01
snake_case : Dict = 10_00_00_00_00
snake_case : int = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 657 |
def __lowercase ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int ):
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError('The length of profit and weight must be same.' )
if max_weight <= 0:
raise ValueError('max_weight must greater than zero.' )
if any(p < 0 for p in profit ):
raise ValueError('Profit can not be negative.' )
if any(w < 0 for w in weight ):
raise ValueError('Weight can not be negative.' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
a__ = [p / w for p, w in zip(__lowerCAmelCase , __lowerCAmelCase )]
# Creating a copy of the list and sorting profit/weight in ascending order
a__ = sorted(__lowerCAmelCase )
# declaring useful variables
a__ = len(__lowerCAmelCase )
a__ = 0
a__ = 0
a__ = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
a__ = sorted_profit_by_weight[length - i - 1]
a__ = profit_by_weight.index(__lowerCAmelCase )
a__ = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'''Input profits, weights, and then max_weight (all positive ints) separated by '''
'''spaces.'''
)
snake_case : Tuple = [int(x) for x in input('''Input profits separated by spaces: ''').split()]
snake_case : Optional[int] = [int(x) for x in input('''Input weights separated by spaces: ''').split()]
snake_case : List[str] = int(input('''Max weight allowed: '''))
# Function Call
calc_profit(profit, weight, max_weight)
| 657 | 1 |
class snake_case_ :
def __init__( self :Union[str, Any] ) -> Union[str, Any]:
a__ = {}
def lowerCamelCase__( self :List[str] ) -> None:
print(self.vertex )
for i in self.vertex:
print(__snake_case ,' -> ' ,' -> '.join([str(__snake_case ) for j in self.vertex[i]] ) )
def lowerCamelCase__( self :List[str] ,__snake_case :int ,__snake_case :int ) -> None:
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(__snake_case )
else:
# else make a new vertex
a__ = [to_vertex]
def lowerCamelCase__( self :Optional[Any] ) -> None:
# visited array for storing already visited nodes
a__ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(__snake_case ,__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :int ,__snake_case :list ) -> None:
# mark start vertex as visited
a__ = True
print(__snake_case ,end=' ' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(__snake_case ,__snake_case )
if __name__ == "__main__":
snake_case : List[Any] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('''DFS:''')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 657 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case : Optional[Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[int] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
snake_case : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 1 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class snake_case_ (lowerCamelCase_ ):
@staticmethod
@abstractmethod
def lowerCamelCase__( __snake_case :ArgumentParser ) -> str:
raise NotImplementedError()
@abstractmethod
def lowerCamelCase__( self :int ) -> str:
raise NotImplementedError()
| 657 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class snake_case_ :
def __init__( self :Optional[Any] ,__snake_case :str ,__snake_case :Optional[Any]=14 ,__snake_case :Dict=7 ,__snake_case :Optional[int]=True ,__snake_case :Optional[int]=True ,__snake_case :Dict=True ,__snake_case :List[Any]=True ,__snake_case :Optional[int]=True ,__snake_case :Any=99 ,__snake_case :List[str]=32 ,__snake_case :List[str]=5 ,__snake_case :Tuple=4 ,__snake_case :Optional[int]=37 ,__snake_case :Optional[int]="gelu" ,__snake_case :Tuple=0.1 ,__snake_case :Tuple=0.1 ,__snake_case :Dict=5_12 ,__snake_case :Union[str, Any]=16 ,__snake_case :str=2 ,__snake_case :Optional[Any]=0.02 ,__snake_case :Dict=3 ,__snake_case :Optional[Any]=4 ,__snake_case :Optional[Any]=None ,) -> Tuple:
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_token_type_ids
a__ = use_input_mask
a__ = use_labels
a__ = use_mc_token_ids
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_labels
a__ = num_choices
a__ = scope
a__ = self.vocab_size - 1
def lowerCamelCase__( self :Optional[int] ) -> Union[str, Any]:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
a__ = None
if self.use_input_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
a__ = None
if self.use_mc_token_ids:
a__ = ids_tensor([self.batch_size, self.num_choices] ,self.seq_length )
a__ = None
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
a__ = ids_tensor([self.batch_size] ,self.num_choices )
a__ = self.get_config()
a__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase__( self :Optional[Any] ) -> Tuple:
return CTRLConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
def lowerCamelCase__( self :str ,__snake_case :List[str] ,__snake_case :Any ,__snake_case :Dict ,__snake_case :int ,__snake_case :Optional[Any] ,*__snake_case :List[str] ) -> List[Any]:
a__ = CTRLModel(config=__snake_case )
model.to(__snake_case )
model.eval()
model(__snake_case ,token_type_ids=__snake_case ,head_mask=__snake_case )
model(__snake_case ,token_type_ids=__snake_case )
a__ = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) ,config.n_layer )
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[str] ,__snake_case :Union[str, Any] ,__snake_case :str ,__snake_case :str ,__snake_case :Dict ,*__snake_case :Dict ) -> Dict:
a__ = CTRLLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__( self :Optional[Any] ) -> Optional[Any]:
a__ = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) = config_and_inputs
a__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return config, inputs_dict
def lowerCamelCase__( self :Optional[int] ,__snake_case :Tuple ,__snake_case :str ,__snake_case :str ,__snake_case :List[str] ,*__snake_case :Optional[int] ) -> List[Any]:
a__ = self.num_labels
a__ = CTRLForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
@require_torch
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase__ : Any = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCAmelCase__ : Any = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : List[str] = False
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Optional[int] ,__snake_case :int ,__snake_case :Any ,__snake_case :List[str] ,__snake_case :Dict ) -> Union[str, Any]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowerCamelCase__( self :int ) -> List[str]:
a__ = CTRLModelTester(self )
a__ = ConfigTester(self ,config_class=__snake_case ,n_embd=37 )
def lowerCamelCase__( self :str ) -> str:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__( self :Tuple ) -> List[Any]:
self.config_tester.run_common_tests()
def lowerCamelCase__( self :str ) -> str:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__snake_case )
def lowerCamelCase__( self :List[Any] ) -> Any:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__snake_case )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase__( self :Union[str, Any] ) -> Tuple:
pass
@slow
def lowerCamelCase__( self :int ) -> List[Any]:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = CTRLModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def lowerCamelCase__( self :Dict ) -> List[str]:
pass
@require_torch
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Union[str, Any] ) -> str:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowerCamelCase__( self :Any ) -> Dict:
a__ = CTRLLMHeadModel.from_pretrained('ctrl' )
model.to(__snake_case )
a__ = torch.tensor(
[[1_18_59, 0, 16_11, 8]] ,dtype=torch.long ,device=__snake_case ) # Legal the president is
a__ = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
a__ = model.generate(__snake_case ,do_sample=__snake_case )
self.assertListEqual(output_ids[0].tolist() ,__snake_case )
| 657 | 1 |
from functools import lru_cache
def __lowercase ( __lowerCAmelCase : int ):
a__ = 2
a__ = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__lowerCAmelCase )
if n > 1:
factors.add(__lowerCAmelCase )
return factors
@lru_cache
def __lowercase ( __lowerCAmelCase : int ):
return len(unique_prime_factors(__lowerCAmelCase ) )
def __lowercase ( __lowerCAmelCase : list ):
return len(set(__lowerCAmelCase ) ) in (0, 1)
def __lowercase ( __lowerCAmelCase : int ):
a__ = 2
while True:
# Increment each value of a generated range
a__ = [base + i for i in range(__lowerCAmelCase )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
a__ = [upf_len(__lowerCAmelCase ) for x in group]
checker.append(__lowerCAmelCase )
# If all numbers in the list are equal, return the group variable.
if equality(__lowerCAmelCase ):
return group
# Increment our base variable by 1
base += 1
def __lowercase ( __lowerCAmelCase : int = 4 ):
a__ = run(__lowerCAmelCase )
return results[0] if len(__lowerCAmelCase ) else None
if __name__ == "__main__":
print(solution())
| 657 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = 1
@register_to_config
def __init__( self :Optional[int] ,__snake_case :int = 10_00 ,__snake_case :Optional[Union[np.ndarray, List[float]]] = None ) -> int:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__snake_case )
# standard deviation of the initial noise distribution
a__ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
a__ = 4
# running values
a__ = []
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :int ,__snake_case :Union[str, torch.device] = None ) -> Union[str, Any]:
a__ = num_inference_steps
a__ = torch.linspace(1 ,0 ,num_inference_steps + 1 )[:-1]
a__ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
a__ = torch.tensor(self.config.trained_betas ,dtype=torch.floataa )
else:
a__ = torch.sin(steps * math.pi / 2 ) ** 2
a__ = (1.0 - self.betas**2) ** 0.5
a__ = (torch.atana(self.betas ,self.alphas ) / math.pi * 2)[:-1]
a__ = timesteps.to(__snake_case )
a__ = []
def lowerCamelCase__( self :Any ,__snake_case :torch.FloatTensor ,__snake_case :int ,__snake_case :torch.FloatTensor ,__snake_case :bool = True ,) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
a__ = (self.timesteps == timestep).nonzero().item()
a__ = timestep_index + 1
a__ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__snake_case )
if len(self.ets ) == 1:
a__ = self.ets[-1]
elif len(self.ets ) == 2:
a__ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
a__ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
a__ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
a__ = self._get_prev_sample(__snake_case ,__snake_case ,__snake_case ,__snake_case )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :torch.FloatTensor ,*__snake_case :int ,**__snake_case :Optional[int] ) -> torch.FloatTensor:
return sample
def lowerCamelCase__( self :Optional[Any] ,__snake_case :List[Any] ,__snake_case :Optional[int] ,__snake_case :Dict ,__snake_case :Any ) -> Optional[Any]:
a__ = self.alphas[timestep_index]
a__ = self.betas[timestep_index]
a__ = self.alphas[prev_timestep_index]
a__ = self.betas[prev_timestep_index]
a__ = (sample - sigma * ets) / max(__snake_case ,1E-8 )
a__ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self :Any ) -> Union[str, Any]:
return self.config.num_train_timesteps
| 657 | 1 |
from math import factorial
def __lowercase ( __lowerCAmelCase : int = 1_0_0 ):
return sum(int(__lowerCAmelCase ) for x in str(factorial(__lowerCAmelCase ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 657 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case : Any = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Union[str, Any] = ['''MobileViTFeatureExtractor''']
snake_case : int = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Dict = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Tuple = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
snake_case : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case : Any = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Union[str, Any] = ['''MobileViTFeatureExtractor''']
snake_case : int = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Dict = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Tuple = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
snake_case : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
snake_case : Dict = logging.get_logger(__name__)
snake_case : Any = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
a__ = TOKENIZER_CLASSES
else:
a__ = {tokenizer_name: getattr(__lowerCAmelCase , tokenizer_name + 'Fast' )}
logger.info(F'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
a__ = TOKENIZER_CLASSES[tokenizer_name]
a__ = True
if checkpoint_name is None:
a__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
a__ = [checkpoint_name]
logger.info(F'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(F'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
a__ = tokenizer_class.from_pretrained(__lowerCAmelCase , force_download=__lowerCAmelCase )
# Save fast tokenizer
logger.info(F'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
a__ , a__ = checkpoint.split('/' )
a__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
elif add_prefix:
a__ = checkpoint
a__ = dump_path
else:
a__ = None
a__ = dump_path
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
a__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
a__ = file_path.split(__lowerCAmelCase )[-1][0]
if next_char == "/":
a__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
a__ = None
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
a__ = tokenizer.save_pretrained(
__lowerCAmelCase , legacy_format=__lowerCAmelCase , filename_prefix=__lowerCAmelCase )
logger.info(F'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(__lowerCAmelCase )
logger.info(F'=> removing {file_name}' )
if __name__ == "__main__":
snake_case : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
f"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
snake_case : List[str] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 657 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case : Dict = logging.get_logger(__name__)
snake_case : List[Any] = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ ):
UpperCAmelCase__ : Union[str, Any] = '''resnet'''
UpperCAmelCase__ : List[Any] = ['''basic''', '''bottleneck''']
def __init__( self :Optional[int] ,__snake_case :int=3 ,__snake_case :Union[str, Any]=64 ,__snake_case :Any=[2_56, 5_12, 10_24, 20_48] ,__snake_case :Dict=[3, 4, 6, 3] ,__snake_case :str="bottleneck" ,__snake_case :Optional[int]="relu" ,__snake_case :Union[str, Any]=False ,__snake_case :List[str]=None ,__snake_case :Union[str, Any]=None ,**__snake_case :Dict ,) -> Dict:
super().__init__(**__snake_case )
if layer_type not in self.layer_types:
raise ValueError(F'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
a__ = num_channels
a__ = embedding_size
a__ = hidden_sizes
a__ = depths
a__ = layer_type
a__ = hidden_act
a__ = downsample_in_first_stage
a__ = ['stem'] + [F'stage{idx}' for idx in range(1 ,len(__snake_case ) + 1 )]
a__ , a__ = get_aligned_output_features_output_indices(
out_features=__snake_case ,out_indices=__snake_case ,stage_names=self.stage_names )
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : int = version.parse('''1.11''' )
@property
def lowerCamelCase__( self :Any ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase__( self :List[str] ) -> float:
return 1E-3
| 657 |
from math import ceil, sqrt
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_0_0_0 ):
a__ = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
a__ = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
a__ = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 657 | 1 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
snake_case : Any = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
snake_case : Tuple = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
snake_case : int = '''zero2'''
snake_case : List[str] = '''zero3'''
snake_case : Optional[Any] = [ZEROa, ZEROa]
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : int ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
a__ = parameterized.to_safe_name('_'.join(str(__lowerCAmelCase ) for x in param.args ) )
return F'{func.__name__}_{param_based_name}'
# Cartesian-product of zero stages with models to test
snake_case : List[str] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class snake_case_ (lowerCamelCase_ ):
@parameterized.expand(__snake_case ,name_func=__snake_case )
def lowerCamelCase__( self :List[Any] ,__snake_case :List[str] ,__snake_case :int ) -> Optional[Any]:
self.run_and_check(
stage=__snake_case ,model=__snake_case ,distributed=__snake_case ,fpaa=__snake_case ,)
@require_torch_multi_gpu
@parameterized.expand(__snake_case ,name_func=__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Union[str, Any] ,__snake_case :Tuple ) -> Tuple:
self.run_and_check(
stage=__snake_case ,model=__snake_case ,distributed=__snake_case ,fpaa=__snake_case ,)
@parameterized.expand(__snake_case ,name_func=__snake_case )
def lowerCamelCase__( self :List[Any] ,__snake_case :Any ,__snake_case :str ) -> List[Any]:
self.run_and_check(
stage=__snake_case ,model=__snake_case ,distributed=__snake_case ,fpaa=__snake_case ,)
@require_torch_multi_gpu
@parameterized.expand(__snake_case ,name_func=__snake_case )
def lowerCamelCase__( self :List[Any] ,__snake_case :Dict ,__snake_case :Optional[int] ) -> Optional[Any]:
self.run_and_check(
stage=__snake_case ,model=__snake_case ,distributed=__snake_case ,fpaa=__snake_case ,)
def lowerCamelCase__( self :Optional[int] ,__snake_case :int ) -> Any:
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def lowerCamelCase__( self :Dict ,__snake_case :str ,__snake_case :str ,__snake_case :int = 10 ,__snake_case :bool = True ,__snake_case :bool = True ,__snake_case :bool = True ,) -> str:
a__ = models[model]
a__ = self.run_trainer(
stage=__snake_case ,model_name=__snake_case ,eval_steps=__snake_case ,num_train_epochs=1 ,distributed=__snake_case ,fpaa=__snake_case ,)
self.do_checks(__snake_case )
return output_dir
def lowerCamelCase__( self :Any ,__snake_case :str ,__snake_case :str ,__snake_case :int = 10 ,__snake_case :int = 1 ,__snake_case :bool = True ,__snake_case :bool = True ,) -> Tuple:
a__ = self.get_auto_remove_tmp_dir('./xxx' ,after=__snake_case )
a__ = F'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(__snake_case )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
a__ = F'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split()
a__ = [F'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py']
a__ = self.get_launcher(__snake_case )
a__ = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__snake_case ,env=self.get_env() )
return output_dir
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Any=False ) -> Optional[Any]:
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
a__ = min(2 ,get_gpu_count() ) if distributed else 1
return F'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split()
| 657 |
from sklearn.metrics import fa_score
import datasets
snake_case : Optional[int] = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
snake_case : List[Any] = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
snake_case : Union[str, Any] = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ (datasets.Metric ):
def lowerCamelCase__( self :Any ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) ,reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] ,)
def lowerCamelCase__( self :Dict ,__snake_case :str ,__snake_case :str ,__snake_case :Dict=None ,__snake_case :str=1 ,__snake_case :Optional[int]="binary" ,__snake_case :Union[str, Any]=None ) -> Tuple:
a__ = fa_score(
__snake_case ,__snake_case ,labels=__snake_case ,pos_label=__snake_case ,average=__snake_case ,sample_weight=__snake_case )
return {"f1": float(__snake_case ) if score.size == 1 else score}
| 657 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
snake_case : Dict = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : int = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Union[str, Any] = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
snake_case : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
snake_case : Any = logging.get_logger(__name__)
snake_case : Tuple = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class snake_case_ (lowerCamelCase_ ):
def __init__( self :str ,__snake_case :Dict=None ,__snake_case :int=None ,*__snake_case :str ,**__snake_case :Union[str, Any] ) -> Tuple:
super().__init__(*__snake_case ,**__snake_case )
if config is None:
assert isinstance(self.model ,__snake_case ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F' {self.model.__class__}'
)
a__ = self.model.config
else:
a__ = config
a__ = data_args
a__ = self.config.tgt_vocab_size if isinstance(self.config ,__snake_case ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'
' padding..' )
if self.args.label_smoothing == 0:
a__ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
a__ = label_smoothed_nll_loss
def lowerCamelCase__( self :Optional[Any] ,__snake_case :int ) -> Tuple:
if self.optimizer is None:
a__ = ['bias', 'LayerNorm.weight']
a__ = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
a__ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
a__ = Adafactor
a__ = {'scale_parameter': False, 'relative_step': False}
else:
a__ = AdamW
a__ = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
a__ = self.args.learning_rate
if self.sharded_ddp:
a__ = OSS(
params=__snake_case ,optim=__snake_case ,**__snake_case ,)
else:
a__ = optimizer_cls(__snake_case ,**__snake_case )
if self.lr_scheduler is None:
a__ = self._get_lr_scheduler(__snake_case )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def lowerCamelCase__( self :Dict ,__snake_case :List[str] ) -> Union[str, Any]:
a__ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
a__ = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
a__ = schedule_func(self.optimizer ,num_warmup_steps=self.args.warmup_steps )
else:
a__ = schedule_func(
self.optimizer ,num_warmup_steps=self.args.warmup_steps ,num_training_steps=__snake_case )
return scheduler
def lowerCamelCase__( self :Optional[Any] ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset ,torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size ,distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) ,)
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowerCamelCase__( self :str ,__snake_case :Optional[int] ,__snake_case :List[Any] ,__snake_case :Any ) -> Optional[Any]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
a__ = model(**__snake_case ,use_cache=__snake_case )[0]
a__ = self.loss_fn(logits.view(-1 ,logits.shape[-1] ) ,labels.view(-1 ) )
else:
# compute usual loss via models
a__ , a__ = model(**__snake_case ,labels=__snake_case ,use_cache=__snake_case )[:2]
else:
# compute label smoothed loss
a__ = model(**__snake_case ,use_cache=__snake_case )[0]
a__ = torch.nn.functional.log_softmax(__snake_case ,dim=-1 )
a__ , a__ = self.loss_fn(__snake_case ,__snake_case ,self.args.label_smoothing ,ignore_index=self.config.pad_token_id )
return loss, logits
def lowerCamelCase__( self :List[Any] ,__snake_case :Dict ,__snake_case :Optional[int] ) -> Any:
a__ = inputs.pop('labels' )
a__ , a__ = self._compute_loss(__snake_case ,__snake_case ,__snake_case )
return loss
def lowerCamelCase__( self :Optional[Any] ,__snake_case :nn.Module ,__snake_case :Dict[str, Union[torch.Tensor, Any]] ,__snake_case :bool ,__snake_case :Optional[List[str]] = None ,) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
a__ = self._prepare_inputs(__snake_case )
a__ = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
a__ = self.model.generate(
inputs['input_ids'] ,attention_mask=inputs['attention_mask'] ,**__snake_case ,)
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
a__ = self._pad_tensors_to_max_len(__snake_case ,gen_kwargs['max_length'] )
a__ = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
a__ , a__ = self._compute_loss(__snake_case ,__snake_case ,__snake_case )
a__ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
a__ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
a__ = self._pad_tensors_to_max_len(__snake_case ,gen_kwargs['max_length'] )
return (loss, logits, labels)
def lowerCamelCase__( self :List[str] ,__snake_case :Optional[Any] ,__snake_case :Union[str, Any] ) -> int:
# If PAD token is not defined at least EOS token has to be defined
a__ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
F' padded to `max_length`={max_length}' )
a__ = pad_token_id * torch.ones(
(tensor.shape[0], max_length) ,dtype=tensor.dtype ,device=tensor.device )
a__ = tensor
return padded_tensor
| 657 | 1 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class snake_case_ :
def __init__( self :Optional[Any] ,__snake_case :str ,__snake_case :Optional[Any]=14 ,__snake_case :Dict=7 ,__snake_case :Optional[int]=True ,__snake_case :Optional[int]=True ,__snake_case :Dict=True ,__snake_case :List[Any]=True ,__snake_case :Optional[int]=True ,__snake_case :Any=99 ,__snake_case :List[str]=32 ,__snake_case :List[str]=5 ,__snake_case :Tuple=4 ,__snake_case :Optional[int]=37 ,__snake_case :Optional[int]="gelu" ,__snake_case :Tuple=0.1 ,__snake_case :Tuple=0.1 ,__snake_case :Dict=5_12 ,__snake_case :Union[str, Any]=16 ,__snake_case :str=2 ,__snake_case :Optional[Any]=0.02 ,__snake_case :Dict=3 ,__snake_case :Optional[Any]=4 ,__snake_case :Optional[Any]=None ,) -> Tuple:
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_token_type_ids
a__ = use_input_mask
a__ = use_labels
a__ = use_mc_token_ids
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_labels
a__ = num_choices
a__ = scope
a__ = self.vocab_size - 1
def lowerCamelCase__( self :Optional[int] ) -> Union[str, Any]:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
a__ = None
if self.use_input_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
a__ = None
if self.use_mc_token_ids:
a__ = ids_tensor([self.batch_size, self.num_choices] ,self.seq_length )
a__ = None
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
a__ = ids_tensor([self.batch_size] ,self.num_choices )
a__ = self.get_config()
a__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase__( self :Optional[Any] ) -> Tuple:
return CTRLConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
def lowerCamelCase__( self :str ,__snake_case :List[str] ,__snake_case :Any ,__snake_case :Dict ,__snake_case :int ,__snake_case :Optional[Any] ,*__snake_case :List[str] ) -> List[Any]:
a__ = CTRLModel(config=__snake_case )
model.to(__snake_case )
model.eval()
model(__snake_case ,token_type_ids=__snake_case ,head_mask=__snake_case )
model(__snake_case ,token_type_ids=__snake_case )
a__ = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) ,config.n_layer )
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[str] ,__snake_case :Union[str, Any] ,__snake_case :str ,__snake_case :str ,__snake_case :Dict ,*__snake_case :Dict ) -> Dict:
a__ = CTRLLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__( self :Optional[Any] ) -> Optional[Any]:
a__ = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) = config_and_inputs
a__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return config, inputs_dict
def lowerCamelCase__( self :Optional[int] ,__snake_case :Tuple ,__snake_case :str ,__snake_case :str ,__snake_case :List[str] ,*__snake_case :Optional[int] ) -> List[Any]:
a__ = self.num_labels
a__ = CTRLForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
@require_torch
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase__ : Any = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCAmelCase__ : Any = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : List[str] = False
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Optional[int] ,__snake_case :int ,__snake_case :Any ,__snake_case :List[str] ,__snake_case :Dict ) -> Union[str, Any]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowerCamelCase__( self :int ) -> List[str]:
a__ = CTRLModelTester(self )
a__ = ConfigTester(self ,config_class=__snake_case ,n_embd=37 )
def lowerCamelCase__( self :str ) -> str:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__( self :Tuple ) -> List[Any]:
self.config_tester.run_common_tests()
def lowerCamelCase__( self :str ) -> str:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__snake_case )
def lowerCamelCase__( self :List[Any] ) -> Any:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__snake_case )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase__( self :Union[str, Any] ) -> Tuple:
pass
@slow
def lowerCamelCase__( self :int ) -> List[Any]:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = CTRLModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def lowerCamelCase__( self :Dict ) -> List[str]:
pass
@require_torch
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Union[str, Any] ) -> str:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowerCamelCase__( self :Any ) -> Dict:
a__ = CTRLLMHeadModel.from_pretrained('ctrl' )
model.to(__snake_case )
a__ = torch.tensor(
[[1_18_59, 0, 16_11, 8]] ,dtype=torch.long ,device=__snake_case ) # Legal the president is
a__ = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
a__ = model.generate(__snake_case ,do_sample=__snake_case )
self.assertListEqual(output_ids[0].tolist() ,__snake_case )
| 657 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
snake_case : Dict = '''
Human: <<task>>
Assistant: '''
snake_case : Optional[int] = '''huggingface-tools/default-prompts'''
snake_case : Tuple = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any="run" ):
if prompt_or_repo_id is None:
a__ = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('\\s' , __lowerCAmelCase ) is not None:
return prompt_or_repo_id
a__ = cached_file(
__lowerCAmelCase , PROMPT_FILES[mode] , repo_type='dataset' , user_agent={'agent': agent_name} )
with open(__lowerCAmelCase , 'r' , encoding='utf-8' ) as f:
return f.read()
| 657 | 1 |
from __future__ import annotations
def __lowercase ( __lowerCAmelCase : list[float] ):
a__ = 0.00
a__ = 0
for resistor in resistors:
if resistor <= 0:
a__ = F'Resistor at index {index} has a negative or zero value!'
raise ValueError(__lowerCAmelCase )
first_sum += 1 / float(__lowerCAmelCase )
index += 1
return 1 / first_sum
def __lowercase ( __lowerCAmelCase : list[float] ):
a__ = 0.00
a__ = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
a__ = F'Resistor at index {index} has a negative value!'
raise ValueError(__lowerCAmelCase )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
a__ = precision
a__ = ceil(precision / 1_4 )
a__ = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
a__ = 1
a__ = 1_3_5_9_1_4_0_9
a__ = Decimal(__lowerCAmelCase )
for k in range(1 , __lowerCAmelCase ):
a__ = factorial(6 * k ) // (factorial(3 * k ) * factorial(__lowerCAmelCase ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
snake_case : Tuple = 50
print(f"""The first {n} digits of pi is: {pi(n)}""")
| 657 | 1 |
import os
def __lowercase ( ):
with open(os.path.dirname(__lowerCAmelCase ) + '/p022_names.txt' ) as file:
a__ = str(file.readlines()[0] )
a__ = names.replace('"' , '' ).split(',' )
names.sort()
a__ = 0
a__ = 0
for i, name in enumerate(__lowerCAmelCase ):
for letter in name:
name_score += ord(__lowerCAmelCase ) - 6_4
total_score += (i + 1) * name_score
a__ = 0
return total_score
if __name__ == "__main__":
print(solution())
| 657 |
def __lowercase ( __lowerCAmelCase : int = 2_0_0 ):
a__ = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
a__ = [0] * (pence + 1)
a__ = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(__lowerCAmelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82
| 657 | 1 |
from ...configuration_utils import PretrainedConfig
snake_case : Tuple = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : int = '''tapas'''
def __init__( self :Tuple ,__snake_case :List[str]=3_05_22 ,__snake_case :Optional[Any]=7_68 ,__snake_case :Any=12 ,__snake_case :List[Any]=12 ,__snake_case :Dict=30_72 ,__snake_case :Optional[int]="gelu" ,__snake_case :Optional[int]=0.1 ,__snake_case :int=0.1 ,__snake_case :Optional[Any]=10_24 ,__snake_case :Optional[Any]=[3, 2_56, 2_56, 2, 2_56, 2_56, 10] ,__snake_case :Tuple=0.02 ,__snake_case :Tuple=1E-12 ,__snake_case :Any=0 ,__snake_case :List[Any]=10.0 ,__snake_case :List[Any]=0 ,__snake_case :str=1.0 ,__snake_case :int=None ,__snake_case :Dict=1.0 ,__snake_case :str=False ,__snake_case :Dict=None ,__snake_case :Any=1.0 ,__snake_case :Optional[int]=1.0 ,__snake_case :Any=False ,__snake_case :int=False ,__snake_case :Union[str, Any]="ratio" ,__snake_case :Union[str, Any]=None ,__snake_case :Dict=None ,__snake_case :List[Any]=64 ,__snake_case :Tuple=32 ,__snake_case :Optional[int]=False ,__snake_case :List[str]=True ,__snake_case :Any=False ,__snake_case :List[str]=False ,__snake_case :List[Any]=True ,__snake_case :str=False ,__snake_case :Dict=None ,__snake_case :Dict=None ,**__snake_case :List[Any] ,) -> str:
super().__init__(pad_token_id=__snake_case ,**__snake_case )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = hidden_act
a__ = intermediate_size
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_sizes
a__ = initializer_range
a__ = layer_norm_eps
# Fine-tuning task hyperparameters
a__ = positive_label_weight
a__ = num_aggregation_labels
a__ = aggregation_loss_weight
a__ = use_answer_as_supervision
a__ = answer_loss_importance
a__ = use_normalized_answer_loss
a__ = huber_loss_delta
a__ = temperature
a__ = aggregation_temperature
a__ = use_gumbel_for_cells
a__ = use_gumbel_for_aggregation
a__ = average_approximation_function
a__ = cell_selection_preference
a__ = answer_loss_cutoff
a__ = max_num_rows
a__ = max_num_columns
a__ = average_logits_per_cell
a__ = select_one_column
a__ = allow_empty_column_selection
a__ = init_cell_selection_weights_to_zero
a__ = reset_position_index_per_cell
a__ = disable_per_token_loss
# Aggregation hyperparameters
a__ = aggregation_labels
a__ = no_aggregation_label_index
if isinstance(self.aggregation_labels ,__snake_case ):
a__ = {int(__snake_case ): v for k, v in aggregation_labels.items()}
| 657 |
from manim import *
class snake_case_ (lowerCamelCase_ ):
def lowerCamelCase__( self :Optional[Any] ) -> Optional[int]:
a__ = Rectangle(height=0.5 ,width=0.5 )
a__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
a__ = Rectangle(height=0.25 ,width=0.25 )
a__ = [mem.copy() for i in range(6 )]
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('CPU' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__snake_case )
a__ = [mem.copy() for i in range(4 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('GPU' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
gpu.move_to([-1, -1, 0] )
self.add(__snake_case )
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('Model' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
model.move_to([3, -1.0, 0] )
self.add(__snake_case )
a__ = []
a__ = []
for i, rect in enumerate(__snake_case ):
a__ = fill.copy().set_fill(__snake_case ,opacity=0.8 )
target.move_to(__snake_case )
model_arr.append(__snake_case )
a__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(__snake_case ,opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__snake_case )
self.add(*__snake_case ,*__snake_case )
a__ = [meta_mem.copy() for i in range(6 )]
a__ = [meta_mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('Disk' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
disk.move_to([-4, -1.25, 0] )
self.add(__snake_case ,__snake_case )
a__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
a__ = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(__snake_case ,__snake_case )
a__ = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' ,font_size=18 ,)
blue_text.next_to(__snake_case ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(__snake_case )
a__ = MarkupText(
F'Now watch as an input is passed through the model\nand how the memory is utilized and handled.' ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ) )
a__ = Square(0.3 )
input.set_fill(__snake_case ,opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] ,__snake_case ,buff=0.5 )
self.play(Write(__snake_case ) )
input.generate_target()
input.target.next_to(model_arr[0] ,direction=__snake_case ,buff=0.02 )
self.play(MoveToTarget(__snake_case ) )
self.play(FadeOut(__snake_case ) )
a__ = Arrow(start=__snake_case ,end=__snake_case ,color=__snake_case ,buff=0.5 )
a.next_to(model_arr[0].get_left() ,__snake_case ,buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
a__ = MarkupText(
F'As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.' ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ,run_time=3 ) )
a__ = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(__snake_case ) ,Circumscribe(model_arr[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(model_cpu_arr[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,)
self.play(MoveToTarget(model_cpu_arr[0] ) )
a__ = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 ,__snake_case ,buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
a__ = AnimationGroup(
FadeOut(__snake_case ,run_time=0.5 ) ,MoveToTarget(__snake_case ,run_time=0.5 ) ,FadeIn(__snake_case ,run_time=0.5 ) ,lag_ratio=0.2 )
self.play(__snake_case )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
a__ = 0.7
self.play(
Circumscribe(model_arr[i] ,**__snake_case ) ,Circumscribe(cpu_left_col_base[i] ,**__snake_case ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(model_arr[i + 1] ,color=__snake_case ,**__snake_case ) ,)
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,)
else:
self.play(
MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,)
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.02 ,buff=0.2 )
self.play(
Circumscribe(model_arr[-1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(cpu_left_col_base[-1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,)
self.play(MoveToTarget(model_cpu_arr[i] ) )
a__ = a_c
a__ = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] ,RIGHT + 0.02 ,buff=0.5 )
self.play(
FadeOut(__snake_case ) ,FadeOut(__snake_case ,run_time=0.5 ) ,)
a__ = MarkupText(F'Inference on a model too large for GPU memory\nis successfully completed.' ,font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ,run_time=3 ) ,MoveToTarget(__snake_case ) )
self.wait()
| 657 | 1 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
snake_case : str = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
snake_case : str = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
snake_case : str = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
snake_case : Tuple = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
snake_case : str = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]),
('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
snake_case : Tuple = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
snake_case : int = (
('''JH AH TH KH QH''', 23),
('''JH 9H TH KH QH''', 22),
('''JC KH JS JD JH''', 21),
('''KH KC 3S 3H 3D''', 20),
('''8C 9C 5C 3C TC''', 19),
('''JS QS 9H TS KH''', 18),
('''7C 7S KH 2H 7H''', 17),
('''3C KH 5D 5S KH''', 16),
('''QH 8H KD JH 8S''', 15),
('''2D 6D 9D TH 7D''', 14),
)
def __lowercase ( ):
a__ , a__ = randrange(len(__lowerCAmelCase ) ), randrange(len(__lowerCAmelCase ) )
a__ = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
a__ , a__ = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __lowercase ( __lowerCAmelCase : int = 1_0_0 ):
return (generate_random_hand() for _ in range(__lowerCAmelCase ))
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] ):
assert PokerHand(__lowerCAmelCase )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
assert PokerHand(__lowerCAmelCase )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ):
a__ = PokerHand(__lowerCAmelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] ):
assert PokerHand(__lowerCAmelCase ).compare_with(PokerHand(__lowerCAmelCase ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase ).compare_with(PokerHand(__lowerCAmelCase ) ) == expected
def __lowercase ( ):
a__ = [PokerHand(__lowerCAmelCase ) for hand in SORTED_HANDS]
a__ = poker_hands.copy()
shuffle(__lowerCAmelCase )
a__ = chain(sorted(__lowerCAmelCase ) )
for index, hand in enumerate(__lowerCAmelCase ):
assert hand == poker_hands[index]
def __lowercase ( ):
# Test that five high straights are compared correctly.
a__ = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=__lowerCAmelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __lowercase ( ):
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
a__ = PokerHand('2C 4S AS 3D 5C' )
a__ = True
a__ = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __lowercase ( ):
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
a__ = 0
a__ = os.path.abspath(os.path.dirname(__lowerCAmelCase ) )
a__ = os.path.join(__lowerCAmelCase , 'poker_hands.txt' )
with open(__lowerCAmelCase ) as file_hand:
for line in file_hand:
a__ = line[:1_4].strip()
a__ = line[1_5:].strip()
a__ , a__ = PokerHand(__lowerCAmelCase ), PokerHand(__lowerCAmelCase )
a__ = player.compare_with(__lowerCAmelCase )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 657 |
from math import pi
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 657 | 1 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : str = '''Speech2TextFeatureExtractor'''
UpperCAmelCase__ : Union[str, Any] = '''Speech2TextTokenizer'''
def __init__( self :str ,__snake_case :List[str] ,__snake_case :List[str] ) -> Union[str, Any]:
super().__init__(__snake_case ,__snake_case )
a__ = self.feature_extractor
a__ = False
def __call__( self :List[str] ,*__snake_case :Optional[Any] ,**__snake_case :str ) -> int:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__snake_case ,**__snake_case )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
a__ = kwargs.pop('raw_speech' )
else:
a__ = kwargs.pop('audio' ,__snake_case )
a__ = kwargs.pop('sampling_rate' ,__snake_case )
a__ = kwargs.pop('text' ,__snake_case )
if len(__snake_case ) > 0:
a__ = args[0]
a__ = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
a__ = self.feature_extractor(__snake_case ,*__snake_case ,sampling_rate=__snake_case ,**__snake_case )
if text is not None:
a__ = self.tokenizer(__snake_case ,**__snake_case )
if text is None:
return inputs
elif audio is None:
return encodings
else:
a__ = encodings['input_ids']
return inputs
def lowerCamelCase__( self :str ,*__snake_case :Union[str, Any] ,**__snake_case :List[Any] ) -> Dict:
return self.tokenizer.batch_decode(*__snake_case ,**__snake_case )
def lowerCamelCase__( self :Tuple ,*__snake_case :Optional[Any] ,**__snake_case :int ) -> Union[str, Any]:
return self.tokenizer.decode(*__snake_case ,**__snake_case )
@contextmanager
def lowerCamelCase__( self :Tuple ) -> Optional[int]:
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
a__ = True
a__ = self.tokenizer
yield
a__ = self.feature_extractor
a__ = False
| 657 |
from math import sqrt
def __lowercase ( __lowerCAmelCase : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_1 ):
a__ = 0
a__ = 1
while count != nth and number < 3:
number += 1
if is_prime(__lowerCAmelCase ):
count += 1
while count != nth:
number += 2
if is_prime(__lowerCAmelCase ):
count += 1
return number
if __name__ == "__main__":
print(f"""{solution() = }""")
| 657 | 1 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : int=True , __lowerCAmelCase : str="pt" ):
a__ = {'add_prefix_space': True} if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and not line.startswith(' ' ) else {}
a__ = padding_side
return tokenizer(
[line] , max_length=__lowerCAmelCase , padding='max_length' if pad_to_max_length else None , truncation=__lowerCAmelCase , return_tensors=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , **__lowerCAmelCase , )
def __lowercase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple=None , ):
a__ = input_ids.ne(__lowerCAmelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class snake_case_ (lowerCamelCase_ ):
def __init__( self :Dict ,__snake_case :Tuple ,__snake_case :Union[str, Any] ,__snake_case :Union[str, Any] ,__snake_case :Union[str, Any] ,__snake_case :Optional[Any]="train" ,__snake_case :str=None ,__snake_case :Optional[Any]=None ,__snake_case :List[Any]=None ,__snake_case :str="" ,) -> Any:
super().__init__()
a__ = Path(__snake_case ).joinpath(type_path + '.source' )
a__ = Path(__snake_case ).joinpath(type_path + '.target' )
a__ = self.get_char_lens(self.src_file )
a__ = max_source_length
a__ = max_target_length
assert min(self.src_lens ) > 0, F'found empty line in {self.src_file}'
a__ = tokenizer
a__ = prefix
if n_obs is not None:
a__ = self.src_lens[:n_obs]
a__ = src_lang
a__ = tgt_lang
def __len__( self :Optional[int] ) -> str:
return len(self.src_lens )
def __getitem__( self :Optional[Any] ,__snake_case :int ) -> Dict[str, torch.Tensor]:
a__ = index + 1 # linecache starts at 1
a__ = self.prefix + linecache.getline(str(self.src_file ) ,__snake_case ).rstrip('\n' )
a__ = linecache.getline(str(self.tgt_file ) ,__snake_case ).rstrip('\n' )
assert source_line, F'empty source line for index {index}'
assert tgt_line, F'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,__snake_case ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
a__ = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,__snake_case ) else self.tokenizer
)
a__ = self.tokenizer.generator if isinstance(self.tokenizer ,__snake_case ) else self.tokenizer
a__ = encode_line(__snake_case ,__snake_case ,self.max_source_length ,'right' )
a__ = encode_line(__snake_case ,__snake_case ,self.max_target_length ,'right' )
a__ = source_inputs['input_ids'].squeeze()
a__ = target_inputs['input_ids'].squeeze()
a__ = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowerCamelCase__( __snake_case :List[str] ) -> Dict:
return [len(__snake_case ) for x in Path(__snake_case ).open().readlines()]
def lowerCamelCase__( self :Optional[int] ,__snake_case :str ) -> Dict[str, torch.Tensor]:
a__ = torch.stack([x['input_ids'] for x in batch] )
a__ = torch.stack([x['attention_mask'] for x in batch] )
a__ = torch.stack([x['decoder_input_ids'] for x in batch] )
a__ = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,__snake_case )
else self.tokenizer.pad_token_id
)
a__ = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,__snake_case )
else self.tokenizer.pad_token_id
)
a__ = trim_batch(__snake_case ,__snake_case )
a__ , a__ = trim_batch(__snake_case ,__snake_case ,attention_mask=__snake_case )
a__ = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
snake_case : List[Any] = getLogger(__name__)
def __lowercase ( __lowerCAmelCase : List[List] ):
return list(itertools.chain.from_iterable(__lowerCAmelCase ) )
def __lowercase ( __lowerCAmelCase : str ):
a__ = get_git_info()
save_json(__lowerCAmelCase , os.path.join(__lowerCAmelCase , 'git_log.json' ) )
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : str=4 , **__lowerCAmelCase : Dict ):
with open(__lowerCAmelCase , 'w' ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase , indent=__lowerCAmelCase , **__lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : List[str] ):
with open(__lowerCAmelCase ) as f:
return json.load(__lowerCAmelCase )
def __lowercase ( ):
a__ = git.Repo(search_parent_directories=__lowerCAmelCase )
a__ = {
'repo_id': str(__lowerCAmelCase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def __lowercase ( __lowerCAmelCase : Callable , __lowerCAmelCase : Iterable ):
return list(map(__lowerCAmelCase , __lowerCAmelCase ) )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : List[Any] ):
with open(__lowerCAmelCase , 'wb' ) as f:
return pickle.dump(__lowerCAmelCase , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Tuple ):
def remove_articles(__lowerCAmelCase : Optional[Any] ):
return re.sub(R'\b(a|an|the)\b' , ' ' , __lowerCAmelCase )
def white_space_fix(__lowerCAmelCase : Optional[Any] ):
return " ".join(text.split() )
def remove_punc(__lowerCAmelCase : List[str] ):
a__ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCAmelCase : List[str] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCAmelCase ) ) ) )
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : str ):
a__ = normalize_answer(__lowerCAmelCase ).split()
a__ = normalize_answer(__lowerCAmelCase ).split()
a__ = Counter(__lowerCAmelCase ) & Counter(__lowerCAmelCase )
a__ = sum(common.values() )
if num_same == 0:
return 0
a__ = 1.0 * num_same / len(__lowerCAmelCase )
a__ = 1.0 * num_same / len(__lowerCAmelCase )
a__ = (2 * precision * recall) / (precision + recall)
return fa
def __lowercase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] ):
return normalize_answer(__lowerCAmelCase ) == normalize_answer(__lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] ):
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
a__ = 0
for hypo, pred in zip(__lowerCAmelCase , __lowerCAmelCase ):
em += exact_match_score(__lowerCAmelCase , __lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
em /= len(__lowerCAmelCase )
return {"em": em}
def __lowercase ( __lowerCAmelCase : int ):
return model_prefix.startswith('rag' )
def __lowercase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] ):
a__ = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
a__ = 'dropout_rate'
for p in extra_params:
if getattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if not hasattr(__lowerCAmelCase , __lowerCAmelCase ) and not hasattr(__lowerCAmelCase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(__lowerCAmelCase ) )
delattr(__lowerCAmelCase , __lowerCAmelCase )
continue
a__ = p if hasattr(__lowerCAmelCase , __lowerCAmelCase ) else equivalent_param[p]
setattr(__lowerCAmelCase , __lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
delattr(__lowerCAmelCase , __lowerCAmelCase )
return hparams, config
| 657 |
import unittest
from knapsack import greedy_knapsack as kp
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[Any] ) -> Union[str, Any]:
a__ = [10, 20, 30, 40, 50, 60]
a__ = [2, 4, 6, 8, 10, 12]
a__ = 1_00
self.assertEqual(kp.calc_profit(__snake_case ,__snake_case ,__snake_case ) ,2_10 )
def lowerCamelCase__( self :str ) -> Optional[int]:
self.assertRaisesRegex(__snake_case ,'max_weight must greater than zero.' )
def lowerCamelCase__( self :Optional[Any] ) -> int:
self.assertRaisesRegex(__snake_case ,'Weight can not be negative.' )
def lowerCamelCase__( self :str ) -> List[str]:
self.assertRaisesRegex(__snake_case ,'Profit can not be negative.' )
def lowerCamelCase__( self :str ) -> Optional[Any]:
self.assertRaisesRegex(__snake_case ,'max_weight must greater than zero.' )
def lowerCamelCase__( self :int ) -> List[Any]:
self.assertRaisesRegex(
__snake_case ,'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 657 | 1 |
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Dict ,__snake_case :List[Any] ,__snake_case :str ,__snake_case :List[Any] ) -> Union[str, Any]:
self.assertEqual(len(__snake_case ) ,len(__snake_case ) )
for a, b in zip(__snake_case ,__snake_case ):
self.assertAlmostEqual(__snake_case ,__snake_case ,delta=__snake_case )
def lowerCamelCase__( self :Optional[int] ) -> Optional[Any]:
a__ = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(__snake_case ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step ,3 )
self.assertEqual(len(accumulator.gradients ) ,1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() ,[-2.0, 5.0] ,tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step ,0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() ,[0.0, 0.0] ,tol=1E-2 )
def lowerCamelCase__( self :Any ) -> Any:
a__ = None
ops.enable_eager_execution_internal()
a__ = tf.config.list_physical_devices('CPU' )
if len(__snake_case ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] ,[tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
a__ = tf.config.list_logical_devices(device_type='CPU' )
a__ = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
a__ = GradientAccumulator()
a__ = tf.Variable([4.0, 3.0] )
a__ , a__ = create_optimizer(5E-5 ,10 ,5 )
a__ = tf.Variable([0.0, 0.0] ,trainable=__snake_case )
def accumulate_on_replica(__snake_case :Any ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients ,[variable] ) ) )
@tf.function
def accumulate(__snake_case :Tuple ,__snake_case :List[str] ):
with strategy.scope():
a__ = strategy.experimental_local_results(__snake_case )
local_variables[0].assign(__snake_case )
local_variables[1].assign(__snake_case )
strategy.run(__snake_case ,args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(__snake_case )
def _check_local_values(__snake_case :Any ,__snake_case :Any ):
a__ = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() ,__snake_case ,tol=1E-2 )
self.assertListAlmostEqual(values[1].value() ,__snake_case ,tol=1E-2 )
accumulate([1.0, 2.0] ,[-1.0, 1.0] )
accumulate([3.0, -1.0] ,[-1.0, -1.0] )
accumulate([-2.0, 2.0] ,[3.0, -2.0] )
self.assertEqual(accumulator.step ,3 )
_check_local_values([2.0, 3.0] ,[1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() ,[4.0, 3.0] ,tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step ,0 )
_check_local_values([0.0, 0.0] ,[0.0, 0.0] )
| 657 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Any=1_0 ):
a__ = []
for _ in range(__lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]=1_0 ):
a__ = []
for step in range(__lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = os.path.join(__lowerCAmelCase , 'schedule.bin' )
torch.save(scheduler.state_dict() , __lowerCAmelCase )
a__ = torch.load(__lowerCAmelCase )
scheduler.load_state_dict(__lowerCAmelCase )
return lrs
@require_torch
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[Any] ,__snake_case :int ,__snake_case :Union[str, Any] ) -> int:
self.assertEqual(len(__snake_case ) ,len(__snake_case ) )
for a, b in zip(__snake_case ,__snake_case ):
self.assertAlmostEqual(__snake_case ,__snake_case ,delta=__snake_case )
def lowerCamelCase__( self :Optional[Any] ) -> str:
a__ = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=__snake_case )
a__ = torch.tensor([0.4, 0.2, -0.5] )
a__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
a__ = AdamW(params=[w] ,lr=2E-1 ,weight_decay=0.0 )
for _ in range(1_00 ):
a__ = criterion(__snake_case ,__snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1E-2 )
def lowerCamelCase__( self :Tuple ) -> int:
a__ = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=__snake_case )
a__ = torch.tensor([0.4, 0.2, -0.5] )
a__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
a__ = Adafactor(
params=[w] ,lr=1E-2 ,eps=(1E-30, 1E-3) ,clip_threshold=1.0 ,decay_rate=-0.8 ,betaa=__snake_case ,weight_decay=0.0 ,relative_step=__snake_case ,scale_parameter=__snake_case ,warmup_init=__snake_case ,)
for _ in range(10_00 ):
a__ = criterion(__snake_case ,__snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1E-2 )
@require_torch
class snake_case_ (unittest.TestCase ):
UpperCAmelCase__ : str = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None
UpperCAmelCase__ : Dict = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
UpperCAmelCase__ : Optional[Any] = 1_0
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Optional[int] ,__snake_case :Tuple ,__snake_case :int ,__snake_case :Any=None ) -> Optional[Any]:
self.assertEqual(len(__snake_case ) ,len(__snake_case ) )
for a, b in zip(__snake_case ,__snake_case ):
self.assertAlmostEqual(__snake_case ,__snake_case ,delta=__snake_case ,msg=__snake_case )
def lowerCamelCase__( self :Tuple ) -> List[Any]:
a__ = {'num_warmup_steps': 2, 'num_training_steps': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
a__ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1E-7},
[0.0, 5.0, 10.0, 7.6_56, 5.6_25, 3.9_06, 2.5, 1.4_06, 0.6_25, 0.1_56],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 10.0, 8.1_65, 7.0_71, 6.3_25, 5.7_74, 5.3_45, 5.0, 4.7_14],
),
}
for scheduler_func, data in scheds.items():
a__ , a__ = data
a__ = scheduler_func(self.optimizer ,**__snake_case )
self.assertEqual(len([scheduler.get_lr()[0]] ) ,1 )
a__ = unwrap_schedule(__snake_case ,self.num_steps )
self.assertListAlmostEqual(
__snake_case ,__snake_case ,tol=1E-2 ,msg=F'failed for {scheduler_func} in normal scheduler' ,)
a__ = scheduler_func(self.optimizer ,**__snake_case )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(__snake_case ) # wrap to test picklability of the schedule
a__ = unwrap_and_save_reload_schedule(__snake_case ,self.num_steps )
self.assertListEqual(__snake_case ,__snake_case ,msg=F'failed for {scheduler_func} in save and reload' )
class snake_case_ :
def __init__( self :Tuple ,__snake_case :str ) -> Any:
a__ = fn
def __call__( self :List[str] ,*__snake_case :Optional[Any] ,**__snake_case :Optional[int] ) -> Union[str, Any]:
return self.fn(*__snake_case ,**__snake_case )
@classmethod
def lowerCamelCase__( self :Tuple ,__snake_case :Union[str, Any] ) -> Dict:
a__ = list(map(self ,scheduler.lr_lambdas ) )
| 657 | 1 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Tuple = IFPipeline
UpperCAmelCase__ : Optional[int] = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
UpperCAmelCase__ : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase__ : int = PipelineTesterMixin.required_optional_params - {'''latents'''}
def lowerCamelCase__( self :Any ) -> List[Any]:
return self._get_dummy_components()
def lowerCamelCase__( self :List[Any] ,__snake_case :int ,__snake_case :int=0 ) -> Optional[Any]:
if str(__snake_case ).startswith('mps' ):
a__ = torch.manual_seed(__snake_case )
else:
a__ = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
a__ = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase__( self :Dict ) -> str:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' ,reason='float16 requires CUDA' )
def lowerCamelCase__( self :Optional[int] ) -> List[Any]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCamelCase__( self :Union[str, Any] ) -> Tuple:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCamelCase__( self :Any ) -> List[str]:
self._test_save_load_local()
def lowerCamelCase__( self :List[str] ) -> List[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 ,)
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,)
def lowerCamelCase__( self :Union[str, Any] ) -> Any:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :List[str] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__( self :Dict ) -> List[Any]:
# if
a__ = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' ,variant='fp16' ,torch_dtype=torch.floataa )
a__ = IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0' ,variant='fp16' ,torch_dtype=torch.floataa ,text_encoder=__snake_case ,tokenizer=__snake_case )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
a__ , a__ = pipe_a.encode_prompt('anime turtle' ,device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
a__ = None
a__ = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(__snake_case ,__snake_case ,__snake_case ,__snake_case )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
a__ = IFImgaImgPipeline(**pipe_a.components )
a__ = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(__snake_case ,__snake_case ,__snake_case ,__snake_case )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
a__ = IFInpaintingPipeline(**pipe_a.components )
a__ = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(__snake_case ,__snake_case ,__snake_case ,__snake_case )
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[Any] ,__snake_case :List[str] ,__snake_case :Optional[Any] ,__snake_case :int ) -> Any:
# pipeline 1
_start_torch_memory_measurement()
a__ = torch.Generator(device='cpu' ).manual_seed(0 )
a__ = pipe_a(
prompt_embeds=__snake_case ,negative_prompt_embeds=__snake_case ,num_inference_steps=2 ,generator=__snake_case ,output_type='np' ,)
a__ = output.images[0]
assert image.shape == (64, 64, 3)
a__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
a__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(__snake_case ,__snake_case )
# pipeline 2
_start_torch_memory_measurement()
a__ = torch.Generator(device='cpu' ).manual_seed(0 )
a__ = floats_tensor((1, 3, 64, 64) ,rng=random.Random(0 ) ).to(__snake_case )
a__ = pipe_a(
prompt_embeds=__snake_case ,negative_prompt_embeds=__snake_case ,image=__snake_case ,generator=__snake_case ,num_inference_steps=2 ,output_type='np' ,)
a__ = output.images[0]
assert image.shape == (2_56, 2_56, 3)
a__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
a__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__snake_case ,__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :int ,__snake_case :str ,__snake_case :Any ,__snake_case :Union[str, Any] ) -> List[Any]:
# pipeline 1
_start_torch_memory_measurement()
a__ = floats_tensor((1, 3, 64, 64) ,rng=random.Random(0 ) ).to(__snake_case )
a__ = torch.Generator(device='cpu' ).manual_seed(0 )
a__ = pipe_a(
prompt_embeds=__snake_case ,negative_prompt_embeds=__snake_case ,image=__snake_case ,num_inference_steps=2 ,generator=__snake_case ,output_type='np' ,)
a__ = output.images[0]
assert image.shape == (64, 64, 3)
a__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
a__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(__snake_case ,__snake_case )
# pipeline 2
_start_torch_memory_measurement()
a__ = torch.Generator(device='cpu' ).manual_seed(0 )
a__ = floats_tensor((1, 3, 2_56, 2_56) ,rng=random.Random(0 ) ).to(__snake_case )
a__ = floats_tensor((1, 3, 64, 64) ,rng=random.Random(0 ) ).to(__snake_case )
a__ = pipe_a(
prompt_embeds=__snake_case ,negative_prompt_embeds=__snake_case ,image=__snake_case ,original_image=__snake_case ,generator=__snake_case ,num_inference_steps=2 ,output_type='np' ,)
a__ = output.images[0]
assert image.shape == (2_56, 2_56, 3)
a__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
a__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__snake_case ,__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Dict ,__snake_case :int ,__snake_case :List[Any] ,__snake_case :Dict ) -> Union[str, Any]:
# pipeline 1
_start_torch_memory_measurement()
a__ = floats_tensor((1, 3, 64, 64) ,rng=random.Random(0 ) ).to(__snake_case )
a__ = floats_tensor((1, 3, 64, 64) ,rng=random.Random(1 ) ).to(__snake_case )
a__ = torch.Generator(device='cpu' ).manual_seed(0 )
a__ = pipe_a(
prompt_embeds=__snake_case ,negative_prompt_embeds=__snake_case ,image=__snake_case ,mask_image=__snake_case ,num_inference_steps=2 ,generator=__snake_case ,output_type='np' ,)
a__ = output.images[0]
assert image.shape == (64, 64, 3)
a__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
a__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(__snake_case ,__snake_case )
# pipeline 2
_start_torch_memory_measurement()
a__ = torch.Generator(device='cpu' ).manual_seed(0 )
a__ = floats_tensor((1, 3, 64, 64) ,rng=random.Random(0 ) ).to(__snake_case )
a__ = floats_tensor((1, 3, 2_56, 2_56) ,rng=random.Random(0 ) ).to(__snake_case )
a__ = floats_tensor((1, 3, 2_56, 2_56) ,rng=random.Random(1 ) ).to(__snake_case )
a__ = pipe_a(
prompt_embeds=__snake_case ,negative_prompt_embeds=__snake_case ,image=__snake_case ,mask_image=__snake_case ,original_image=__snake_case ,generator=__snake_case ,num_inference_steps=2 ,output_type='np' ,)
a__ = output.images[0]
assert image.shape == (2_56, 2_56, 3)
a__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
a__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__snake_case ,__snake_case )
def __lowercase ( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 657 |
from __future__ import annotations
def __lowercase ( __lowerCAmelCase : list[int] ): # This function is recursive
a__ = len(__lowerCAmelCase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
a__ = array[0]
a__ = False
a__ = 1
a__ = []
while not is_found and i < array_length:
if array[i] < pivot:
a__ = True
a__ = [element for element in array[i:] if element >= array[i]]
a__ = longest_subsequence(__lowerCAmelCase )
if len(__lowerCAmelCase ) > len(__lowerCAmelCase ):
a__ = temp_array
else:
i += 1
a__ = [element for element in array[1:] if element >= pivot]
a__ = [pivot, *longest_subsequence(__lowerCAmelCase )]
if len(__lowerCAmelCase ) > len(__lowerCAmelCase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case : Optional[Any] = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[Any] = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
snake_case : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
snake_case : Dict = logging.get_logger(__name__)
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Dict = ['''pixel_values''']
def __init__( self :Optional[Any] ,__snake_case :bool = True ,__snake_case :int = 32 ,__snake_case :Union[str, Any]=PILImageResampling.BILINEAR ,__snake_case :bool = True ,**__snake_case :Tuple ,) -> None:
a__ = do_resize
a__ = do_rescale
a__ = size_divisor
a__ = resample
super().__init__(**__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :np.ndarray ,__snake_case :int ,__snake_case :Tuple ,__snake_case :Optional[ChannelDimension] = None ,**__snake_case :List[Any] ) -> np.ndarray:
a__ , a__ = get_image_size(__snake_case )
# Rounds the height and width down to the closest multiple of size_divisor
a__ = height // size_divisor * size_divisor
a__ = width // size_divisor * size_divisor
a__ = resize(__snake_case ,(new_h, new_w) ,resample=__snake_case ,data_format=__snake_case ,**__snake_case )
return image
def lowerCamelCase__( self :List[str] ,__snake_case :np.ndarray ,__snake_case :float ,__snake_case :Optional[ChannelDimension] = None ,**__snake_case :str ) -> np.ndarray:
return rescale(image=__snake_case ,scale=__snake_case ,data_format=__snake_case ,**__snake_case )
def lowerCamelCase__( self :Tuple ,__snake_case :Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] ,__snake_case :Optional[bool] = None ,__snake_case :Optional[int] = None ,__snake_case :Union[str, Any]=None ,__snake_case :Optional[bool] = None ,__snake_case :Optional[Union[TensorType, str]] = None ,__snake_case :ChannelDimension = ChannelDimension.FIRST ,**__snake_case :List[Any] ,) -> BatchFeature:
a__ = do_resize if do_resize is not None else self.do_resize
a__ = do_rescale if do_rescale is not None else self.do_rescale
a__ = size_divisor if size_divisor is not None else self.size_divisor
a__ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
a__ = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
a__ = [to_numpy_array(__snake_case ) for img in images]
if do_resize:
a__ = [self.resize(__snake_case ,size_divisor=__snake_case ,resample=__snake_case ) for image in images]
if do_rescale:
a__ = [self.rescale(__snake_case ,scale=1 / 2_55 ) for image in images]
a__ = [to_channel_dimension_format(__snake_case ,__snake_case ) for image in images]
a__ = {'pixel_values': images}
return BatchFeature(data=__snake_case ,tensor_type=__snake_case )
| 657 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
snake_case : Optional[int] = {
'''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''],
'''processing_speech_to_text''': ['''Speech2TextProcessor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[Any] = ['''Speech2TextTokenizer''']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Union[str, Any] = ['''Speech2TextFeatureExtractor''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Any = [
'''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSpeech2TextForConditionalGeneration''',
'''TFSpeech2TextModel''',
'''TFSpeech2TextPreTrainedModel''',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : int = [
'''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Speech2TextForConditionalGeneration''',
'''Speech2TextModel''',
'''Speech2TextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
snake_case : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 |
def __lowercase ( __lowerCAmelCase : int ):
a__ = generate_pascal_triangle(__lowerCAmelCase )
for row_idx in range(__lowerCAmelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=' ' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=' ' )
else:
print(triangle[row_idx][col_idx] , end='' )
print()
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
a__ = []
for current_row_idx in range(__lowerCAmelCase ):
a__ = populate_current_row(__lowerCAmelCase , __lowerCAmelCase )
triangle.append(__lowerCAmelCase )
return triangle
def __lowercase ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : int ):
a__ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
a__ , a__ = 1, 1
for current_col_idx in range(1 , __lowerCAmelCase ):
calculate_current_element(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return current_row
def __lowercase ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , ):
a__ = triangle[current_row_idx - 1][current_col_idx - 1]
a__ = triangle[current_row_idx - 1][current_col_idx]
a__ = above_to_left_elt + above_to_right_elt
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
a__ = [[1]]
for row_index in range(1 , __lowerCAmelCase ):
a__ = [0] + result[-1] + [0]
a__ = row_index + 1
# Calculate the number of distinct elements in a row
a__ = sum(divmod(__lowerCAmelCase , 2 ) )
a__ = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
a__ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
a__ = row_first_half + row_second_half
result.append(__lowerCAmelCase )
return result
def __lowercase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__lowerCAmelCase : Callable , __lowerCAmelCase : int ) -> None:
a__ = F'{func.__name__}({value})'
a__ = timeit(F'__main__.{call}' , setup='import __main__' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'{call:38} -- {timing:.4f} seconds' )
for value in range(1_5 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__lowerCAmelCase , __lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 657 | 1 |
def __lowercase ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int ):
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError('The length of profit and weight must be same.' )
if max_weight <= 0:
raise ValueError('max_weight must greater than zero.' )
if any(p < 0 for p in profit ):
raise ValueError('Profit can not be negative.' )
if any(w < 0 for w in weight ):
raise ValueError('Weight can not be negative.' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
a__ = [p / w for p, w in zip(__lowerCAmelCase , __lowerCAmelCase )]
# Creating a copy of the list and sorting profit/weight in ascending order
a__ = sorted(__lowerCAmelCase )
# declaring useful variables
a__ = len(__lowerCAmelCase )
a__ = 0
a__ = 0
a__ = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
a__ = sorted_profit_by_weight[length - i - 1]
a__ = profit_by_weight.index(__lowerCAmelCase )
a__ = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'''Input profits, weights, and then max_weight (all positive ints) separated by '''
'''spaces.'''
)
snake_case : Tuple = [int(x) for x in input('''Input profits separated by spaces: ''').split()]
snake_case : Optional[int] = [int(x) for x in input('''Input weights separated by spaces: ''').split()]
snake_case : List[str] = int(input('''Max weight allowed: '''))
# Function Call
calc_profit(profit, weight, max_weight)
| 657 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
snake_case : str = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
snake_case : str = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
snake_case : str = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
snake_case : Tuple = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
snake_case : str = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]),
('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
snake_case : Tuple = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
snake_case : int = (
('''JH AH TH KH QH''', 23),
('''JH 9H TH KH QH''', 22),
('''JC KH JS JD JH''', 21),
('''KH KC 3S 3H 3D''', 20),
('''8C 9C 5C 3C TC''', 19),
('''JS QS 9H TS KH''', 18),
('''7C 7S KH 2H 7H''', 17),
('''3C KH 5D 5S KH''', 16),
('''QH 8H KD JH 8S''', 15),
('''2D 6D 9D TH 7D''', 14),
)
def __lowercase ( ):
a__ , a__ = randrange(len(__lowerCAmelCase ) ), randrange(len(__lowerCAmelCase ) )
a__ = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
a__ , a__ = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __lowercase ( __lowerCAmelCase : int = 1_0_0 ):
return (generate_random_hand() for _ in range(__lowerCAmelCase ))
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] ):
assert PokerHand(__lowerCAmelCase )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
assert PokerHand(__lowerCAmelCase )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ):
a__ = PokerHand(__lowerCAmelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] ):
assert PokerHand(__lowerCAmelCase ).compare_with(PokerHand(__lowerCAmelCase ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase ).compare_with(PokerHand(__lowerCAmelCase ) ) == expected
def __lowercase ( ):
a__ = [PokerHand(__lowerCAmelCase ) for hand in SORTED_HANDS]
a__ = poker_hands.copy()
shuffle(__lowerCAmelCase )
a__ = chain(sorted(__lowerCAmelCase ) )
for index, hand in enumerate(__lowerCAmelCase ):
assert hand == poker_hands[index]
def __lowercase ( ):
# Test that five high straights are compared correctly.
a__ = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=__lowerCAmelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __lowercase ( ):
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
a__ = PokerHand('2C 4S AS 3D 5C' )
a__ = True
a__ = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __lowercase ( ):
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
a__ = 0
a__ = os.path.abspath(os.path.dirname(__lowerCAmelCase ) )
a__ = os.path.join(__lowerCAmelCase , 'poker_hands.txt' )
with open(__lowerCAmelCase ) as file_hand:
for line in file_hand:
a__ = line[:1_4].strip()
a__ = line[1_5:].strip()
a__ , a__ = PokerHand(__lowerCAmelCase ), PokerHand(__lowerCAmelCase )
a__ = player.compare_with(__lowerCAmelCase )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 657 | 1 |
from __future__ import annotations
from collections.abc import Generator
def __lowercase ( ):
a__ = {}
a__ = 2
while True:
a__ = factor_map.pop(__lowerCAmelCase , __lowerCAmelCase )
if factor:
a__ = factor + prime
while x in factor_map:
x += factor
a__ = factor
else:
a__ = prime
yield prime
prime += 1
def __lowercase ( __lowerCAmelCase : float = 1E1_0 ):
a__ = sieve()
a__ = 1
while True:
a__ = next(__lowerCAmelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(__lowerCAmelCase )
n += 2
if __name__ == "__main__":
print(solution())
| 657 |
def __lowercase ( __lowerCAmelCase : int ):
if length <= 0 or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(__lowerCAmelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 657 | 1 |
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
snake_case : List[str] = {
'''facebook/maskformer-swin-base-ade''': (
'''https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'''
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
snake_case : List[str] = logging.get_logger(__name__)
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Dict = '''maskformer'''
UpperCAmelCase__ : Any = {'''hidden_size''': '''mask_feature_size'''}
UpperCAmelCase__ : Union[str, Any] = ['''resnet''', '''swin''']
UpperCAmelCase__ : Any = ['''detr''']
def __init__( self :List[str] ,__snake_case :int = 2_56 ,__snake_case :int = 2_56 ,__snake_case :float = 0.1 ,__snake_case :bool = False ,__snake_case :Optional[Dict] = None ,__snake_case :Optional[Dict] = None ,__snake_case :float = 0.02 ,__snake_case :float = 1.0 ,__snake_case :float = 1.0 ,__snake_case :float = 1.0 ,__snake_case :float = 20.0 ,__snake_case :Optional[bool] = None ,**__snake_case :List[Any] ,) -> Dict:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
a__ = SwinConfig(
image_size=3_84 ,in_channels=3 ,patch_size=4 ,embed_dim=1_28 ,depths=[2, 2, 18, 2] ,num_heads=[4, 8, 16, 32] ,window_size=12 ,drop_path_rate=0.3 ,out_features=['stage1', 'stage2', 'stage3', 'stage4'] ,)
if isinstance(__snake_case ,__snake_case ):
a__ = backbone_config.pop('model_type' )
a__ = CONFIG_MAPPING[backbone_model_type]
a__ = config_class.from_dict(__snake_case )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '
F'Supported model types: {",".join(self.backbones_supported )}' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
a__ = DetrConfig()
else:
# verify that the decoder is supported
a__ = (
decoder_config.pop('model_type' ) if isinstance(__snake_case ,__snake_case ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F'Transformer Decoder {decoder_type} not supported, please use one of'
F' {",".join(self.decoders_supported )}' )
if isinstance(__snake_case ,__snake_case ):
a__ = CONFIG_MAPPING[decoder_type]
a__ = config_class.from_dict(__snake_case )
a__ = backbone_config
a__ = decoder_config
# main feature dimension for the model
a__ = fpn_feature_size
a__ = mask_feature_size
# initializer
a__ = init_std
a__ = init_xavier_std
# Hungarian matcher && loss
a__ = cross_entropy_weight
a__ = dice_weight
a__ = mask_weight
a__ = use_auxiliary_loss
a__ = no_object_weight
a__ = output_auxiliary_logits
a__ = self.decoder_config.encoder_attention_heads
a__ = self.decoder_config.num_hidden_layers
super().__init__(**__snake_case )
@classmethod
def lowerCamelCase__( cls :Union[str, Any] ,__snake_case :PretrainedConfig ,__snake_case :PretrainedConfig ,**__snake_case :Any ) -> str:
return cls(
backbone_config=__snake_case ,decoder_config=__snake_case ,**__snake_case ,)
def lowerCamelCase__( self :Dict ) -> Dict[str, any]:
a__ = copy.deepcopy(self.__dict__ )
a__ = self.backbone_config.to_dict()
a__ = self.decoder_config.to_dict()
a__ = self.__class__.model_type
return output
| 657 |
def __lowercase ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int ):
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError('The length of profit and weight must be same.' )
if max_weight <= 0:
raise ValueError('max_weight must greater than zero.' )
if any(p < 0 for p in profit ):
raise ValueError('Profit can not be negative.' )
if any(w < 0 for w in weight ):
raise ValueError('Weight can not be negative.' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
a__ = [p / w for p, w in zip(__lowerCAmelCase , __lowerCAmelCase )]
# Creating a copy of the list and sorting profit/weight in ascending order
a__ = sorted(__lowerCAmelCase )
# declaring useful variables
a__ = len(__lowerCAmelCase )
a__ = 0
a__ = 0
a__ = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
a__ = sorted_profit_by_weight[length - i - 1]
a__ = profit_by_weight.index(__lowerCAmelCase )
a__ = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'''Input profits, weights, and then max_weight (all positive ints) separated by '''
'''spaces.'''
)
snake_case : Tuple = [int(x) for x in input('''Input profits separated by spaces: ''').split()]
snake_case : Optional[int] = [int(x) for x in input('''Input weights separated by spaces: ''').split()]
snake_case : List[str] = int(input('''Max weight allowed: '''))
# Function Call
calc_profit(profit, weight, max_weight)
| 657 | 1 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class snake_case_ :
@staticmethod
def lowerCamelCase__( *__snake_case :Tuple ,**__snake_case :Tuple ) -> List[str]:
pass
def __lowercase ( __lowerCAmelCase : Image ):
a__ = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class snake_case_ (unittest.TestCase ):
UpperCAmelCase__ : List[Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowerCamelCase__( self :List[str] ,__snake_case :List[str] ,__snake_case :str ,__snake_case :int ) -> List[str]:
a__ = DepthEstimationPipeline(model=__snake_case ,image_processor=__snake_case )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase__( self :List[Any] ,__snake_case :Union[str, Any] ,__snake_case :int ) -> str:
a__ = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} ,__snake_case )
import datasets
a__ = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' ,'image' ,split='test' )
a__ = depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] ,__snake_case ,)
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def lowerCamelCase__( self :List[Any] ) -> List[Any]:
pass
@slow
@require_torch
def lowerCamelCase__( self :Dict ) -> Dict:
a__ = 'Intel/dpt-large'
a__ = pipeline('depth-estimation' ,model=__snake_case )
a__ = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
a__ = hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) ,29.3_04 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) ,2.6_62 )
@require_torch
def lowerCamelCase__( self :Union[str, Any] ) -> Optional[int]:
# This is highly irregular to have no small tests.
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 657 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case : Optional[Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[int] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
snake_case : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 1 |
from math import pi
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 657 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class snake_case_ :
def __init__( self :Optional[Any] ,__snake_case :str ,__snake_case :Optional[Any]=14 ,__snake_case :Dict=7 ,__snake_case :Optional[int]=True ,__snake_case :Optional[int]=True ,__snake_case :Dict=True ,__snake_case :List[Any]=True ,__snake_case :Optional[int]=True ,__snake_case :Any=99 ,__snake_case :List[str]=32 ,__snake_case :List[str]=5 ,__snake_case :Tuple=4 ,__snake_case :Optional[int]=37 ,__snake_case :Optional[int]="gelu" ,__snake_case :Tuple=0.1 ,__snake_case :Tuple=0.1 ,__snake_case :Dict=5_12 ,__snake_case :Union[str, Any]=16 ,__snake_case :str=2 ,__snake_case :Optional[Any]=0.02 ,__snake_case :Dict=3 ,__snake_case :Optional[Any]=4 ,__snake_case :Optional[Any]=None ,) -> Tuple:
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_token_type_ids
a__ = use_input_mask
a__ = use_labels
a__ = use_mc_token_ids
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_labels
a__ = num_choices
a__ = scope
a__ = self.vocab_size - 1
def lowerCamelCase__( self :Optional[int] ) -> Union[str, Any]:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
a__ = None
if self.use_input_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
a__ = None
if self.use_mc_token_ids:
a__ = ids_tensor([self.batch_size, self.num_choices] ,self.seq_length )
a__ = None
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
a__ = ids_tensor([self.batch_size] ,self.num_choices )
a__ = self.get_config()
a__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase__( self :Optional[Any] ) -> Tuple:
return CTRLConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
def lowerCamelCase__( self :str ,__snake_case :List[str] ,__snake_case :Any ,__snake_case :Dict ,__snake_case :int ,__snake_case :Optional[Any] ,*__snake_case :List[str] ) -> List[Any]:
a__ = CTRLModel(config=__snake_case )
model.to(__snake_case )
model.eval()
model(__snake_case ,token_type_ids=__snake_case ,head_mask=__snake_case )
model(__snake_case ,token_type_ids=__snake_case )
a__ = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) ,config.n_layer )
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[str] ,__snake_case :Union[str, Any] ,__snake_case :str ,__snake_case :str ,__snake_case :Dict ,*__snake_case :Dict ) -> Dict:
a__ = CTRLLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__( self :Optional[Any] ) -> Optional[Any]:
a__ = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) = config_and_inputs
a__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return config, inputs_dict
def lowerCamelCase__( self :Optional[int] ,__snake_case :Tuple ,__snake_case :str ,__snake_case :str ,__snake_case :List[str] ,*__snake_case :Optional[int] ) -> List[Any]:
a__ = self.num_labels
a__ = CTRLForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
@require_torch
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase__ : Any = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCAmelCase__ : Any = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : List[str] = False
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Optional[int] ,__snake_case :int ,__snake_case :Any ,__snake_case :List[str] ,__snake_case :Dict ) -> Union[str, Any]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowerCamelCase__( self :int ) -> List[str]:
a__ = CTRLModelTester(self )
a__ = ConfigTester(self ,config_class=__snake_case ,n_embd=37 )
def lowerCamelCase__( self :str ) -> str:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__( self :Tuple ) -> List[Any]:
self.config_tester.run_common_tests()
def lowerCamelCase__( self :str ) -> str:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__snake_case )
def lowerCamelCase__( self :List[Any] ) -> Any:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__snake_case )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase__( self :Union[str, Any] ) -> Tuple:
pass
@slow
def lowerCamelCase__( self :int ) -> List[Any]:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = CTRLModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def lowerCamelCase__( self :Dict ) -> List[str]:
pass
@require_torch
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Union[str, Any] ) -> str:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowerCamelCase__( self :Any ) -> Dict:
a__ = CTRLLMHeadModel.from_pretrained('ctrl' )
model.to(__snake_case )
a__ = torch.tensor(
[[1_18_59, 0, 16_11, 8]] ,dtype=torch.long ,device=__snake_case ) # Legal the president is
a__ = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
a__ = model.generate(__snake_case ,do_sample=__snake_case )
self.assertListEqual(output_ids[0].tolist() ,__snake_case )
| 657 | 1 |
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
snake_case : str = logging.getLogger()
def __lowercase ( __lowerCAmelCase : Path , __lowerCAmelCase : list ):
a__ = '\n'.join(__lowerCAmelCase )
Path(__lowerCAmelCase ).open('w' ).writelines(__lowerCAmelCase )
snake_case : Dict = '''patrickvonplaten/t5-tiny-random'''
snake_case : Union[str, Any] = '''sshleifer/bart-tiny-random'''
snake_case : Tuple = '''sshleifer/tiny-mbart'''
snake_case : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class snake_case_ (lowerCamelCase_ ):
def lowerCamelCase__( self :str ,__snake_case :List[Any] ) -> Optional[int]:
a__ = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
a__ = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
a__ = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.']
_dump_articles(__snake_case ,__snake_case )
a__ = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' )
a__ = 'translation_en_to_de' if model == T5_TINY else 'summarization'
a__ = F'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split()
with patch.object(__snake_case ,'argv' ,__snake_case ):
run_generate()
assert Path(__snake_case ).exists()
# os.remove(Path(output_file_name))
def lowerCamelCase__( self :Any ) -> int:
self.run_eval_tester(__snake_case )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def lowerCamelCase__( self :Optional[int] ,__snake_case :str ) -> Optional[int]:
self.run_eval_tester(__snake_case )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def lowerCamelCase__( self :str ,__snake_case :Union[str, Any] ) -> Any:
a__ = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
a__ = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
a__ = {
'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'],
'de': [
'Maschinelles Lernen ist großartig, oder?',
'Ich esse gerne Bananen',
'Morgen ist wieder ein toller Tag!',
],
}
a__ = Path(self.get_auto_remove_tmp_dir() )
a__ = str(tmp_dir / 'scores.json' )
a__ = str(tmp_dir / 'val.target' )
_dump_articles(__snake_case ,text['en'] )
_dump_articles(__snake_case ,text['de'] )
a__ = 'translation_en_to_de' if model == T5_TINY else 'summarization'
a__ = F'\n run_eval_search.py\n {model}\n {str(__snake_case )}\n {str(__snake_case )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split()
testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] )
with patch.object(__snake_case ,'argv' ,__snake_case ):
with CaptureStdout() as cs:
run_search()
a__ = [' num_beams | length_penalty', model, 'Best score args']
a__ = ['Info']
if "translation" in task:
expected_strings.append('bleu' )
else:
expected_strings.extend(__snake_case )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(__snake_case ).exists()
os.remove(Path(__snake_case ) )
| 657 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = 1
@register_to_config
def __init__( self :Optional[int] ,__snake_case :int = 10_00 ,__snake_case :Optional[Union[np.ndarray, List[float]]] = None ) -> int:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__snake_case )
# standard deviation of the initial noise distribution
a__ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
a__ = 4
# running values
a__ = []
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :int ,__snake_case :Union[str, torch.device] = None ) -> Union[str, Any]:
a__ = num_inference_steps
a__ = torch.linspace(1 ,0 ,num_inference_steps + 1 )[:-1]
a__ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
a__ = torch.tensor(self.config.trained_betas ,dtype=torch.floataa )
else:
a__ = torch.sin(steps * math.pi / 2 ) ** 2
a__ = (1.0 - self.betas**2) ** 0.5
a__ = (torch.atana(self.betas ,self.alphas ) / math.pi * 2)[:-1]
a__ = timesteps.to(__snake_case )
a__ = []
def lowerCamelCase__( self :Any ,__snake_case :torch.FloatTensor ,__snake_case :int ,__snake_case :torch.FloatTensor ,__snake_case :bool = True ,) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
a__ = (self.timesteps == timestep).nonzero().item()
a__ = timestep_index + 1
a__ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__snake_case )
if len(self.ets ) == 1:
a__ = self.ets[-1]
elif len(self.ets ) == 2:
a__ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
a__ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
a__ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
a__ = self._get_prev_sample(__snake_case ,__snake_case ,__snake_case ,__snake_case )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :torch.FloatTensor ,*__snake_case :int ,**__snake_case :Optional[int] ) -> torch.FloatTensor:
return sample
def lowerCamelCase__( self :Optional[Any] ,__snake_case :List[Any] ,__snake_case :Optional[int] ,__snake_case :Dict ,__snake_case :Any ) -> Optional[Any]:
a__ = self.alphas[timestep_index]
a__ = self.betas[timestep_index]
a__ = self.alphas[prev_timestep_index]
a__ = self.betas[prev_timestep_index]
a__ = (sample - sigma * ets) / max(__snake_case ,1E-8 )
a__ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self :Any ) -> Union[str, Any]:
return self.config.num_train_timesteps
| 657 | 1 |
from __future__ import annotations
import requests
def __lowercase ( __lowerCAmelCase : str ):
a__ = F'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(__lowerCAmelCase ).json()
def __lowercase ( __lowerCAmelCase : int = 1_0 ):
a__ = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
a__ = requests.get(__lowerCAmelCase ).json()[:max_stories]
return [get_hackernews_story(__lowerCAmelCase ) for story_id in story_ids]
def __lowercase ( __lowerCAmelCase : int = 1_0 ):
a__ = hackernews_top_stories(__lowerCAmelCase )
return "\n".join('* [{title}]({url})'.format(**__lowerCAmelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 657 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case : Any = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Union[str, Any] = ['''MobileViTFeatureExtractor''']
snake_case : int = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Dict = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Tuple = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
snake_case : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case : Optional[Any] = {
'''configuration_clap''': [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapAudioConfig''',
'''ClapConfig''',
'''ClapTextConfig''',
],
'''processing_clap''': ['''ClapProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Dict = [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapModel''',
'''ClapPreTrainedModel''',
'''ClapTextModel''',
'''ClapTextModelWithProjection''',
'''ClapAudioModel''',
'''ClapAudioModelWithProjection''',
]
snake_case : Any = ['''ClapFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
snake_case : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
snake_case : Dict = logging.get_logger(__name__)
snake_case : Any = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
a__ = TOKENIZER_CLASSES
else:
a__ = {tokenizer_name: getattr(__lowerCAmelCase , tokenizer_name + 'Fast' )}
logger.info(F'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
a__ = TOKENIZER_CLASSES[tokenizer_name]
a__ = True
if checkpoint_name is None:
a__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
a__ = [checkpoint_name]
logger.info(F'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(F'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
a__ = tokenizer_class.from_pretrained(__lowerCAmelCase , force_download=__lowerCAmelCase )
# Save fast tokenizer
logger.info(F'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
a__ , a__ = checkpoint.split('/' )
a__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
elif add_prefix:
a__ = checkpoint
a__ = dump_path
else:
a__ = None
a__ = dump_path
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
a__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
a__ = file_path.split(__lowerCAmelCase )[-1][0]
if next_char == "/":
a__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
a__ = None
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
a__ = tokenizer.save_pretrained(
__lowerCAmelCase , legacy_format=__lowerCAmelCase , filename_prefix=__lowerCAmelCase )
logger.info(F'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(__lowerCAmelCase )
logger.info(F'=> removing {file_name}' )
if __name__ == "__main__":
snake_case : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
f"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
snake_case : List[str] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 657 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : str = logging.get_logger(__name__)
snake_case : Tuple = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : int = '''markuplm'''
def __init__( self :List[str] ,__snake_case :Union[str, Any]=3_05_22 ,__snake_case :Dict=7_68 ,__snake_case :int=12 ,__snake_case :int=12 ,__snake_case :Tuple=30_72 ,__snake_case :Union[str, Any]="gelu" ,__snake_case :Union[str, Any]=0.1 ,__snake_case :Tuple=0.1 ,__snake_case :int=5_12 ,__snake_case :Dict=2 ,__snake_case :str=0.02 ,__snake_case :int=1E-12 ,__snake_case :Optional[int]=0 ,__snake_case :Union[str, Any]=0 ,__snake_case :Union[str, Any]=2 ,__snake_case :Optional[Any]=2_56 ,__snake_case :List[Any]=10_24 ,__snake_case :Optional[Any]=2_16 ,__snake_case :int=10_01 ,__snake_case :List[Any]=32 ,__snake_case :int=50 ,__snake_case :List[Any]="absolute" ,__snake_case :int=True ,__snake_case :Any=None ,**__snake_case :Optional[Any] ,) -> int:
super().__init__(
pad_token_id=__snake_case ,bos_token_id=__snake_case ,eos_token_id=__snake_case ,**__snake_case ,)
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = hidden_act
a__ = intermediate_size
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = initializer_range
a__ = layer_norm_eps
a__ = position_embedding_type
a__ = use_cache
a__ = classifier_dropout
# additional properties
a__ = max_depth
a__ = max_xpath_tag_unit_embeddings
a__ = max_xpath_subs_unit_embeddings
a__ = tag_pad_id
a__ = subs_pad_id
a__ = xpath_unit_hidden_size
| 657 |
from math import ceil, sqrt
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_0_0_0 ):
a__ = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
a__ = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
a__ = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 657 | 1 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class snake_case_ :
def __init__( self :str ,__snake_case :int ,__snake_case :Tuple=2 ,__snake_case :List[Any]=3 ,__snake_case :Union[str, Any]=4 ,__snake_case :Dict=2 ,__snake_case :List[Any]=7 ,__snake_case :int=True ,__snake_case :Union[str, Any]=True ,__snake_case :Union[str, Any]=True ,__snake_case :Optional[int]=True ,__snake_case :str=99 ,__snake_case :Any=36 ,__snake_case :Optional[int]=3 ,__snake_case :Union[str, Any]=4 ,__snake_case :Any=37 ,__snake_case :Any="gelu" ,__snake_case :Dict=0.1 ,__snake_case :List[str]=0.1 ,__snake_case :Optional[Any]=5_12 ,__snake_case :List[Any]=16 ,__snake_case :int=2 ,__snake_case :Optional[int]=0.02 ,__snake_case :Union[str, Any]=6 ,__snake_case :Dict=6 ,__snake_case :List[Any]=3 ,__snake_case :Dict=4 ,__snake_case :Dict=None ,__snake_case :Optional[Any]=10_00 ,) -> Optional[Any]:
a__ = parent
a__ = batch_size
a__ = num_channels
a__ = image_size
a__ = patch_size
a__ = text_seq_length
a__ = is_training
a__ = use_input_mask
a__ = use_token_type_ids
a__ = use_labels
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = coordinate_size
a__ = shape_size
a__ = num_labels
a__ = num_choices
a__ = scope
a__ = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
a__ = text_seq_length
a__ = (image_size // patch_size) ** 2 + 1
a__ = self.text_seq_length + self.image_seq_length
def lowerCamelCase__( self :str ) -> List[str]:
a__ = ids_tensor([self.batch_size, self.text_seq_length] ,self.vocab_size )
a__ = ids_tensor([self.batch_size, self.text_seq_length, 4] ,self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
a__ = bbox[i, j, 3]
a__ = bbox[i, j, 1]
a__ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
a__ = bbox[i, j, 2]
a__ = bbox[i, j, 0]
a__ = t
a__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ = None
if self.use_input_mask:
a__ = random_attention_mask([self.batch_size, self.text_seq_length] )
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.text_seq_length] ,self.type_vocab_size )
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.text_seq_length] ,self.num_labels )
a__ = LayoutLMvaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,coordinate_size=self.coordinate_size ,shape_size=self.shape_size ,input_size=self.image_size ,patch_size=self.patch_size ,)
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCamelCase__( self :Tuple ,__snake_case :int ,__snake_case :str ,__snake_case :str ,__snake_case :Dict ,__snake_case :int ,__snake_case :Union[str, Any] ,__snake_case :int ,__snake_case :List[Any] ) -> Union[str, Any]:
a__ = LayoutLMvaModel(config=__snake_case )
model.to(__snake_case )
model.eval()
# text + image
a__ = model(__snake_case ,pixel_values=__snake_case )
a__ = model(
__snake_case ,bbox=__snake_case ,pixel_values=__snake_case ,attention_mask=__snake_case ,token_type_ids=__snake_case )
a__ = model(__snake_case ,bbox=__snake_case ,pixel_values=__snake_case ,token_type_ids=__snake_case )
a__ = model(__snake_case ,bbox=__snake_case ,pixel_values=__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
# text only
a__ = model(__snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
a__ = model(pixel_values=__snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.image_seq_length, self.hidden_size) )
def lowerCamelCase__( self :Tuple ,__snake_case :Union[str, Any] ,__snake_case :Optional[int] ,__snake_case :Optional[Any] ,__snake_case :str ,__snake_case :Optional[Any] ,__snake_case :List[Any] ,__snake_case :List[str] ,__snake_case :int ) -> List[Any]:
a__ = self.num_labels
a__ = LayoutLMvaForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(
__snake_case ,bbox=__snake_case ,pixel_values=__snake_case ,attention_mask=__snake_case ,token_type_ids=__snake_case ,labels=__snake_case ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def lowerCamelCase__( self :Dict ,__snake_case :int ,__snake_case :Optional[Any] ,__snake_case :List[str] ,__snake_case :Tuple ,__snake_case :List[Any] ,__snake_case :List[Any] ,__snake_case :Optional[Any] ,__snake_case :Dict ) -> Optional[Any]:
a__ = self.num_labels
a__ = LayoutLMvaForTokenClassification(config=__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(
__snake_case ,bbox=__snake_case ,pixel_values=__snake_case ,attention_mask=__snake_case ,token_type_ids=__snake_case ,labels=__snake_case ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.text_seq_length, self.num_labels) )
def lowerCamelCase__( self :List[str] ,__snake_case :Any ,__snake_case :Optional[Any] ,__snake_case :str ,__snake_case :Tuple ,__snake_case :Tuple ,__snake_case :Optional[int] ,__snake_case :Union[str, Any] ,__snake_case :Union[str, Any] ) -> Dict:
a__ = LayoutLMvaForQuestionAnswering(config=__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(
__snake_case ,bbox=__snake_case ,pixel_values=__snake_case ,attention_mask=__snake_case ,token_type_ids=__snake_case ,start_positions=__snake_case ,end_positions=__snake_case ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def lowerCamelCase__( self :List[Any] ) -> Union[str, Any]:
a__ = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) = config_and_inputs
a__ = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : List[str] = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : List[Any] = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def lowerCamelCase__( self :List[str] ,__snake_case :int ,__snake_case :List[str] ,__snake_case :int ,__snake_case :Union[str, Any] ,__snake_case :int ) -> Tuple:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def lowerCamelCase__( self :List[str] ) -> List[Any]:
a__ = LayoutLMvaModelTester(self )
a__ = ConfigTester(self ,config_class=__snake_case ,hidden_size=37 )
def lowerCamelCase__( self :str ,__snake_case :Union[str, Any] ,__snake_case :Dict ,__snake_case :Union[str, Any]=False ) -> Tuple:
a__ = copy.deepcopy(__snake_case )
if model_class in get_values(__snake_case ):
a__ = {
k: v.unsqueeze(1 ).expand(-1 ,self.model_tester.num_choices ,-1 ).contiguous()
if isinstance(__snake_case ,torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__snake_case ):
a__ = torch.ones(self.model_tester.batch_size ,dtype=torch.long ,device=__snake_case )
elif model_class in get_values(__snake_case ):
a__ = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=__snake_case )
a__ = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=__snake_case )
elif model_class in [
*get_values(__snake_case ),
]:
a__ = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=__snake_case )
elif model_class in [
*get_values(__snake_case ),
]:
a__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) ,dtype=torch.long ,device=__snake_case ,)
return inputs_dict
def lowerCamelCase__( self :str ) -> str:
self.config_tester.run_common_tests()
def lowerCamelCase__( self :List[str] ) -> Union[str, Any]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def lowerCamelCase__( self :str ) -> Optional[Any]:
a__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a__ = type
self.model_tester.create_and_check_model(*__snake_case )
def lowerCamelCase__( self :List[str] ) -> str:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__snake_case )
def lowerCamelCase__( self :Any ) -> Dict:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__snake_case )
def lowerCamelCase__( self :Any ) -> Union[str, Any]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__snake_case )
@slow
def lowerCamelCase__( self :Optional[int] ) -> int:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = LayoutLMvaModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def __lowercase ( ):
a__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class snake_case_ (unittest.TestCase ):
@cached_property
def lowerCamelCase__( self :Dict ) -> Dict:
return LayoutLMvaImageProcessor(apply_ocr=__snake_case ) if is_vision_available() else None
@slow
def lowerCamelCase__( self :List[Any] ) -> Optional[int]:
a__ = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(__snake_case )
a__ = self.default_image_processor
a__ = prepare_img()
a__ = image_processor(images=__snake_case ,return_tensors='pt' ).pixel_values.to(__snake_case )
a__ = torch.tensor([[1, 2]] )
a__ = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
a__ = model(
input_ids=input_ids.to(__snake_case ) ,bbox=bbox.to(__snake_case ) ,pixel_values=pixel_values.to(__snake_case ) ,)
# verify the logits
a__ = torch.Size((1, 1_99, 7_68) )
self.assertEqual(outputs.last_hidden_state.shape ,__snake_case )
a__ = torch.tensor(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] ,__snake_case ,atol=1E-4 ) )
| 657 |
from sklearn.metrics import fa_score
import datasets
snake_case : Optional[int] = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
snake_case : List[Any] = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
snake_case : Union[str, Any] = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ (datasets.Metric ):
def lowerCamelCase__( self :Any ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) ,reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] ,)
def lowerCamelCase__( self :Dict ,__snake_case :str ,__snake_case :str ,__snake_case :Dict=None ,__snake_case :str=1 ,__snake_case :Optional[int]="binary" ,__snake_case :Union[str, Any]=None ) -> Tuple:
a__ = fa_score(
__snake_case ,__snake_case ,labels=__snake_case ,pos_label=__snake_case ,average=__snake_case ,sample_weight=__snake_case )
return {"f1": float(__snake_case ) if score.size == 1 else score}
| 657 | 1 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class snake_case_ (lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : int = BarthezTokenizer
UpperCAmelCase__ : List[Any] = BarthezTokenizerFast
UpperCAmelCase__ : str = True
UpperCAmelCase__ : Any = True
def lowerCamelCase__( self :Optional[Any] ) -> List[str]:
super().setUp()
a__ = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname ,legacy_format=__snake_case )
a__ = tokenizer
def lowerCamelCase__( self :Tuple ) -> Any:
a__ = '<pad>'
a__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) ,__snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) ,__snake_case )
def lowerCamelCase__( self :List[Any] ) -> Union[str, Any]:
a__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<s>' )
self.assertEqual(vocab_keys[1] ,'<pad>' )
self.assertEqual(vocab_keys[-1] ,'<mask>' )
self.assertEqual(len(__snake_case ) ,10_11_22 )
def lowerCamelCase__( self :Tuple ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size ,10_11_22 )
@require_torch
def lowerCamelCase__( self :Optional[Any] ) -> Optional[Any]:
a__ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
a__ = [0, 57, 30_18, 7_03_07, 91, 2]
a__ = self.tokenizer(
__snake_case ,max_length=len(__snake_case ) ,padding=__snake_case ,truncation=__snake_case ,return_tensors='pt' )
self.assertIsInstance(__snake_case ,__snake_case )
self.assertEqual((2, 6) ,batch.input_ids.shape )
self.assertEqual((2, 6) ,batch.attention_mask.shape )
a__ = batch.input_ids.tolist()[0]
self.assertListEqual(__snake_case ,__snake_case )
def lowerCamelCase__( self :Any ) -> List[Any]:
if not self.test_rust_tokenizer:
return
a__ = self.get_tokenizer()
a__ = self.get_rust_tokenizer()
a__ = 'I was born in 92000, and this is falsé.'
a__ = tokenizer.tokenize(__snake_case )
a__ = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case ,__snake_case )
a__ = tokenizer.encode(__snake_case ,add_special_tokens=__snake_case )
a__ = rust_tokenizer.encode(__snake_case ,add_special_tokens=__snake_case )
self.assertListEqual(__snake_case ,__snake_case )
a__ = self.get_rust_tokenizer()
a__ = tokenizer.encode(__snake_case )
a__ = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case ,__snake_case )
@slow
def lowerCamelCase__( self :Dict ) -> Any:
# fmt: off
a__ = {'input_ids': [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
a__ = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=__snake_case ,model_name='moussaKam/mbarthez' ,revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' ,sequences=__snake_case ,)
| 657 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
snake_case : Any = logging.get_logger(__name__)
snake_case : Tuple = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class snake_case_ (lowerCamelCase_ ):
def __init__( self :str ,__snake_case :Dict=None ,__snake_case :int=None ,*__snake_case :str ,**__snake_case :Union[str, Any] ) -> Tuple:
super().__init__(*__snake_case ,**__snake_case )
if config is None:
assert isinstance(self.model ,__snake_case ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F' {self.model.__class__}'
)
a__ = self.model.config
else:
a__ = config
a__ = data_args
a__ = self.config.tgt_vocab_size if isinstance(self.config ,__snake_case ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'
' padding..' )
if self.args.label_smoothing == 0:
a__ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
a__ = label_smoothed_nll_loss
def lowerCamelCase__( self :Optional[Any] ,__snake_case :int ) -> Tuple:
if self.optimizer is None:
a__ = ['bias', 'LayerNorm.weight']
a__ = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
a__ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
a__ = Adafactor
a__ = {'scale_parameter': False, 'relative_step': False}
else:
a__ = AdamW
a__ = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
a__ = self.args.learning_rate
if self.sharded_ddp:
a__ = OSS(
params=__snake_case ,optim=__snake_case ,**__snake_case ,)
else:
a__ = optimizer_cls(__snake_case ,**__snake_case )
if self.lr_scheduler is None:
a__ = self._get_lr_scheduler(__snake_case )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def lowerCamelCase__( self :Dict ,__snake_case :List[str] ) -> Union[str, Any]:
a__ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
a__ = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
a__ = schedule_func(self.optimizer ,num_warmup_steps=self.args.warmup_steps )
else:
a__ = schedule_func(
self.optimizer ,num_warmup_steps=self.args.warmup_steps ,num_training_steps=__snake_case )
return scheduler
def lowerCamelCase__( self :Optional[Any] ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset ,torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size ,distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) ,)
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowerCamelCase__( self :str ,__snake_case :Optional[int] ,__snake_case :List[Any] ,__snake_case :Any ) -> Optional[Any]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
a__ = model(**__snake_case ,use_cache=__snake_case )[0]
a__ = self.loss_fn(logits.view(-1 ,logits.shape[-1] ) ,labels.view(-1 ) )
else:
# compute usual loss via models
a__ , a__ = model(**__snake_case ,labels=__snake_case ,use_cache=__snake_case )[:2]
else:
# compute label smoothed loss
a__ = model(**__snake_case ,use_cache=__snake_case )[0]
a__ = torch.nn.functional.log_softmax(__snake_case ,dim=-1 )
a__ , a__ = self.loss_fn(__snake_case ,__snake_case ,self.args.label_smoothing ,ignore_index=self.config.pad_token_id )
return loss, logits
def lowerCamelCase__( self :List[Any] ,__snake_case :Dict ,__snake_case :Optional[int] ) -> Any:
a__ = inputs.pop('labels' )
a__ , a__ = self._compute_loss(__snake_case ,__snake_case ,__snake_case )
return loss
def lowerCamelCase__( self :Optional[Any] ,__snake_case :nn.Module ,__snake_case :Dict[str, Union[torch.Tensor, Any]] ,__snake_case :bool ,__snake_case :Optional[List[str]] = None ,) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
a__ = self._prepare_inputs(__snake_case )
a__ = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
a__ = self.model.generate(
inputs['input_ids'] ,attention_mask=inputs['attention_mask'] ,**__snake_case ,)
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
a__ = self._pad_tensors_to_max_len(__snake_case ,gen_kwargs['max_length'] )
a__ = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
a__ , a__ = self._compute_loss(__snake_case ,__snake_case ,__snake_case )
a__ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
a__ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
a__ = self._pad_tensors_to_max_len(__snake_case ,gen_kwargs['max_length'] )
return (loss, logits, labels)
def lowerCamelCase__( self :List[str] ,__snake_case :Optional[Any] ,__snake_case :Union[str, Any] ) -> int:
# If PAD token is not defined at least EOS token has to be defined
a__ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
F' padded to `max_length`={max_length}' )
a__ = pad_token_id * torch.ones(
(tensor.shape[0], max_length) ,dtype=tensor.dtype ,device=tensor.device )
a__ = tensor
return padded_tensor
| 657 | 1 |
from __future__ import annotations
def __lowercase ( __lowerCAmelCase : list[int] ): # This function is recursive
a__ = len(__lowerCAmelCase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
a__ = array[0]
a__ = False
a__ = 1
a__ = []
while not is_found and i < array_length:
if array[i] < pivot:
a__ = True
a__ = [element for element in array[i:] if element >= array[i]]
a__ = longest_subsequence(__lowerCAmelCase )
if len(__lowerCAmelCase ) > len(__lowerCAmelCase ):
a__ = temp_array
else:
i += 1
a__ = [element for element in array[1:] if element >= pivot]
a__ = [pivot, *longest_subsequence(__lowerCAmelCase )]
if len(__lowerCAmelCase ) > len(__lowerCAmelCase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
snake_case : Dict = '''
Human: <<task>>
Assistant: '''
snake_case : Optional[int] = '''huggingface-tools/default-prompts'''
snake_case : Tuple = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any="run" ):
if prompt_or_repo_id is None:
a__ = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('\\s' , __lowerCAmelCase ) is not None:
return prompt_or_repo_id
a__ = cached_file(
__lowerCAmelCase , PROMPT_FILES[mode] , repo_type='dataset' , user_agent={'agent': agent_name} )
with open(__lowerCAmelCase , 'r' , encoding='utf-8' ) as f:
return f.read()
| 657 | 1 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case : Any = logging.get_logger(__name__)
snake_case : List[str] = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
snake_case : Optional[Any] = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
snake_case : Tuple = {'''facebook/blenderbot_small-90M''': 5_12}
def __lowercase ( __lowerCAmelCase : Union[str, Any] ):
a__ = set()
a__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
a__ = char
a__ = set(__lowerCAmelCase )
return pairs
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Tuple = VOCAB_FILES_NAMES
UpperCAmelCase__ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : int = ['''input_ids''', '''attention_mask''']
def __init__( self :str ,__snake_case :Any ,__snake_case :Tuple ,__snake_case :Optional[int]="__start__" ,__snake_case :List[str]="__end__" ,__snake_case :Tuple="__unk__" ,__snake_case :Dict="__null__" ,**__snake_case :int ,) -> List[Any]:
super().__init__(unk_token=__snake_case ,bos_token=__snake_case ,eos_token=__snake_case ,pad_token=__snake_case ,**__snake_case )
with open(__snake_case ,encoding='utf-8' ) as vocab_handle:
a__ = json.load(__snake_case )
a__ = {v: k for k, v in self.encoder.items()}
with open(__snake_case ,encoding='utf-8' ) as merges_handle:
a__ = merges_handle.read().split('\n' )[1:-1]
a__ = [tuple(merge.split() ) for merge in merges]
a__ = dict(zip(__snake_case ,range(len(__snake_case ) ) ) )
a__ = {}
@property
def lowerCamelCase__( self :Optional[Any] ) -> int:
return len(self.encoder )
def lowerCamelCase__( self :Dict ) -> Dict:
return dict(self.encoder ,**self.added_tokens_encoder )
def lowerCamelCase__( self :str ,__snake_case :str ) -> str:
if token in self.cache:
return self.cache[token]
a__ = re.sub('([.,!?()])' ,R' \1' ,__snake_case )
a__ = re.sub('(\')' ,R' \1 ' ,__snake_case )
a__ = re.sub(R'\s{2,}' ,' ' ,__snake_case )
if "\n" in token:
a__ = token.replace('\n' ,' __newln__' )
a__ = token.split(' ' )
a__ = []
for token in tokens:
if not len(__snake_case ):
continue
a__ = token.lower()
a__ = tuple(__snake_case )
a__ = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
a__ = get_pairs(__snake_case )
if not pairs:
words.append(__snake_case )
continue
while True:
a__ = min(__snake_case ,key=lambda __snake_case : self.bpe_ranks.get(__snake_case ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
a__ , a__ = bigram
a__ = []
a__ = 0
while i < len(__snake_case ):
try:
a__ = word.index(__snake_case ,__snake_case )
new_word.extend(word[i:j] )
a__ = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(__snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a__ = tuple(__snake_case )
a__ = new_word
if len(__snake_case ) == 1:
break
else:
a__ = get_pairs(__snake_case )
a__ = '@@ '.join(__snake_case )
a__ = word[:-4]
a__ = word
words.append(__snake_case )
return " ".join(__snake_case )
def lowerCamelCase__( self :Any ,__snake_case :str ) -> List[str]:
a__ = []
a__ = re.findall(R'\S+\n?' ,__snake_case )
for token in words:
split_tokens.extend(list(self.bpe(__snake_case ).split(' ' ) ) )
return split_tokens
def lowerCamelCase__( self :List[str] ,__snake_case :str ) -> int:
a__ = token.lower()
return self.encoder.get(__snake_case ,self.encoder.get(self.unk_token ) )
def lowerCamelCase__( self :List[Any] ,__snake_case :int ) -> str:
return self.decoder.get(__snake_case ,self.unk_token )
def lowerCamelCase__( self :Tuple ,__snake_case :List[str] ) -> str:
a__ = ' '.join(__snake_case ).replace('@@ ' ,'' ).strip()
return out_string
def lowerCamelCase__( self :Optional[int] ,__snake_case :str ,__snake_case :Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
a__ = os.path.join(
__snake_case ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
a__ = os.path.join(
__snake_case ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(__snake_case ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=__snake_case ,ensure_ascii=__snake_case ) + '\n' )
a__ = 0
with open(__snake_case ,'w' ,encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda __snake_case : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
a__ = token_index
writer.write(' '.join(__snake_case ) + '\n' )
index += 1
return vocab_file, merge_file
| 657 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
a__ = precision
a__ = ceil(precision / 1_4 )
a__ = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
a__ = 1
a__ = 1_3_5_9_1_4_0_9
a__ = Decimal(__lowerCAmelCase )
for k in range(1 , __lowerCAmelCase ):
a__ = factorial(6 * k ) // (factorial(3 * k ) * factorial(__lowerCAmelCase ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
snake_case : Tuple = 50
print(f"""The first {n} digits of pi is: {pi(n)}""")
| 657 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __lowercase ( ):
a__ = ArgumentParser('Accelerate CLI tool' , usage='accelerate <command> [<args>]' , allow_abbrev=__lowerCAmelCase )
a__ = parser.add_subparsers(help='accelerate command helpers' )
# Register commands
get_config_parser(subparsers=__lowerCAmelCase )
env_command_parser(subparsers=__lowerCAmelCase )
launch_command_parser(subparsers=__lowerCAmelCase )
tpu_command_parser(subparsers=__lowerCAmelCase )
test_command_parser(subparsers=__lowerCAmelCase )
# Let's go
a__ = parser.parse_args()
if not hasattr(__lowerCAmelCase , 'func' ):
parser.print_help()
exit(1 )
# Run
args.func(__lowerCAmelCase )
if __name__ == "__main__":
main()
| 657 |
def __lowercase ( __lowerCAmelCase : int = 2_0_0 ):
a__ = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
a__ = [0] * (pence + 1)
a__ = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(__lowerCAmelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82
| 657 | 1 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : List[str] ):
a__ = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, nicht wahr?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
a__ = {
'wmt16-en-de-dist-12-1': [28.3, 27.52],
'wmt16-en-de-dist-6-1': [27.4, 27.11],
'wmt16-en-de-12-1': [26.9, 25.75],
}
a__ = F'{src_lang}-{tgt_lang}'
a__ = F'\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "allenai/{model_name}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n'
model_card_dir.mkdir(parents=__lowerCAmelCase , exist_ok=__lowerCAmelCase )
a__ = os.path.join(__lowerCAmelCase , 'README.md' )
print(F'Generating {path}' )
with open(__lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(__lowerCAmelCase )
# make sure we are under the root of the project
snake_case : Any = Path(__file__).resolve().parent.parent.parent
snake_case : int = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
snake_case : List[str] = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name)
| 657 |
from manim import *
class snake_case_ (lowerCamelCase_ ):
def lowerCamelCase__( self :Optional[Any] ) -> Optional[int]:
a__ = Rectangle(height=0.5 ,width=0.5 )
a__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
a__ = Rectangle(height=0.25 ,width=0.25 )
a__ = [mem.copy() for i in range(6 )]
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('CPU' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__snake_case )
a__ = [mem.copy() for i in range(4 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('GPU' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
gpu.move_to([-1, -1, 0] )
self.add(__snake_case )
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('Model' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
model.move_to([3, -1.0, 0] )
self.add(__snake_case )
a__ = []
a__ = []
for i, rect in enumerate(__snake_case ):
a__ = fill.copy().set_fill(__snake_case ,opacity=0.8 )
target.move_to(__snake_case )
model_arr.append(__snake_case )
a__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(__snake_case ,opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__snake_case )
self.add(*__snake_case ,*__snake_case )
a__ = [meta_mem.copy() for i in range(6 )]
a__ = [meta_mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('Disk' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
disk.move_to([-4, -1.25, 0] )
self.add(__snake_case ,__snake_case )
a__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
a__ = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(__snake_case ,__snake_case )
a__ = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' ,font_size=18 ,)
blue_text.next_to(__snake_case ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(__snake_case )
a__ = MarkupText(
F'Now watch as an input is passed through the model\nand how the memory is utilized and handled.' ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ) )
a__ = Square(0.3 )
input.set_fill(__snake_case ,opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] ,__snake_case ,buff=0.5 )
self.play(Write(__snake_case ) )
input.generate_target()
input.target.next_to(model_arr[0] ,direction=__snake_case ,buff=0.02 )
self.play(MoveToTarget(__snake_case ) )
self.play(FadeOut(__snake_case ) )
a__ = Arrow(start=__snake_case ,end=__snake_case ,color=__snake_case ,buff=0.5 )
a.next_to(model_arr[0].get_left() ,__snake_case ,buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
a__ = MarkupText(
F'As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.' ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ,run_time=3 ) )
a__ = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(__snake_case ) ,Circumscribe(model_arr[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(model_cpu_arr[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,)
self.play(MoveToTarget(model_cpu_arr[0] ) )
a__ = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 ,__snake_case ,buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
a__ = AnimationGroup(
FadeOut(__snake_case ,run_time=0.5 ) ,MoveToTarget(__snake_case ,run_time=0.5 ) ,FadeIn(__snake_case ,run_time=0.5 ) ,lag_ratio=0.2 )
self.play(__snake_case )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
a__ = 0.7
self.play(
Circumscribe(model_arr[i] ,**__snake_case ) ,Circumscribe(cpu_left_col_base[i] ,**__snake_case ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(model_arr[i + 1] ,color=__snake_case ,**__snake_case ) ,)
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,)
else:
self.play(
MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,)
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.02 ,buff=0.2 )
self.play(
Circumscribe(model_arr[-1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(cpu_left_col_base[-1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,)
self.play(MoveToTarget(model_cpu_arr[i] ) )
a__ = a_c
a__ = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] ,RIGHT + 0.02 ,buff=0.5 )
self.play(
FadeOut(__snake_case ) ,FadeOut(__snake_case ,run_time=0.5 ) ,)
a__ = MarkupText(F'Inference on a model too large for GPU memory\nis successfully completed.' ,font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ,run_time=3 ) ,MoveToTarget(__snake_case ) )
self.wait()
| 657 | 1 |
import itertools
import string
from collections.abc import Generator, Iterable
def __lowercase ( __lowerCAmelCase : Iterable[str] , __lowerCAmelCase : int ):
a__ = iter(__lowerCAmelCase )
while True:
a__ = tuple(itertools.islice(__lowerCAmelCase , __lowerCAmelCase ) )
if not chunk:
return
yield chunk
def __lowercase ( __lowerCAmelCase : str ):
a__ = ''.join([c.upper() for c in dirty if c in string.ascii_letters] )
a__ = ''
if len(__lowerCAmelCase ) < 2:
return dirty
for i in range(len(__lowerCAmelCase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(__lowerCAmelCase ) & 1:
clean += "X"
return clean
def __lowercase ( __lowerCAmelCase : str ):
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
a__ = 'ABCDEFGHIKLMNOPQRSTUVWXYZ'
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
a__ = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(__lowerCAmelCase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(__lowerCAmelCase )
return table
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : str ):
a__ = generate_table(__lowerCAmelCase )
a__ = prepare_input(__lowerCAmelCase )
a__ = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__lowerCAmelCase , 2 ):
a__ , a__ = divmod(table.index(__lowerCAmelCase ) , 5 )
a__ , a__ = divmod(table.index(__lowerCAmelCase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : str ):
a__ = generate_table(__lowerCAmelCase )
a__ = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__lowerCAmelCase , 2 ):
a__ , a__ = divmod(table.index(__lowerCAmelCase ) , 5 )
a__ , a__ = divmod(table.index(__lowerCAmelCase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 657 |
from math import pi
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 657 | 1 |
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0 ):
a__ = 2**power
a__ = 0
while n:
a__ , a__ = r + n % 1_0, n // 1_0
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 657 |
from math import sqrt
def __lowercase ( __lowerCAmelCase : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_1 ):
a__ = 0
a__ = 1
while count != nth and number < 3:
number += 1
if is_prime(__lowerCAmelCase ):
count += 1
while count != nth:
number += 2
if is_prime(__lowerCAmelCase ):
count += 1
return number
if __name__ == "__main__":
print(f"""{solution() = }""")
| 657 | 1 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Tuple ,__snake_case :Any ,__snake_case :Dict ) -> Union[str, Any]:
return F'gaussian_noise_s={seed}_shape={"_".join([str(__snake_case ) for s in shape] )}.npy'
def lowerCamelCase__( self :int ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCamelCase__( self :List[Any] ,__snake_case :List[str]=0 ,__snake_case :Union[str, Any]=(4, 4, 64, 64) ,__snake_case :Optional[int]=False ) -> str:
a__ = jnp.bfloataa if fpaa else jnp.floataa
a__ = jnp.array(load_hf_numpy(self.get_file_format(__snake_case ,__snake_case ) ) ,dtype=__snake_case )
return image
def lowerCamelCase__( self :str ,__snake_case :Union[str, Any]=False ,__snake_case :List[str]="CompVis/stable-diffusion-v1-4" ) -> Optional[int]:
a__ = jnp.bfloataa if fpaa else jnp.floataa
a__ = 'bf16' if fpaa else None
a__ , a__ = FlaxUNetaDConditionModel.from_pretrained(
__snake_case ,subfolder='unet' ,dtype=__snake_case ,revision=__snake_case )
return model, params
def lowerCamelCase__( self :int ,__snake_case :Union[str, Any]=0 ,__snake_case :str=(4, 77, 7_68) ,__snake_case :str=False ) -> Optional[Any]:
a__ = jnp.bfloataa if fpaa else jnp.floataa
a__ = jnp.array(load_hf_numpy(self.get_file_format(__snake_case ,__snake_case ) ) ,dtype=__snake_case )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.23_23, -0.13_04, 0.08_13, -0.30_93, -0.09_19, -0.15_71, -0.11_25, -0.58_06]],
[17, 0.55, [-0.08_31, -0.24_43, 0.09_01, -0.09_19, 0.33_96, 0.01_03, -0.37_43, 0.07_01]],
[8, 0.89, [-0.48_63, 0.08_59, 0.08_75, -0.16_58, 0.91_99, -0.01_14, 0.48_39, 0.46_39]],
[3, 10_00, [-0.56_49, 0.24_02, -0.55_18, 0.12_48, 1.13_28, -0.24_43, -0.03_25, -1.00_78]],
# fmt: on
] )
def lowerCamelCase__( self :Any ,__snake_case :Any ,__snake_case :Dict ,__snake_case :str ) -> Optional[int]:
a__ , a__ = self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4' ,fpaa=__snake_case )
a__ = self.get_latents(__snake_case ,fpaa=__snake_case )
a__ = self.get_encoder_hidden_states(__snake_case ,fpaa=__snake_case )
a__ = model.apply(
{'params': params} ,__snake_case ,jnp.array(__snake_case ,dtype=jnp.intaa ) ,encoder_hidden_states=__snake_case ,).sample
assert sample.shape == latents.shape
a__ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) ,dtype=jnp.floataa )
a__ = jnp.array(__snake_case ,dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(__snake_case ,__snake_case ,atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.15_14, 0.08_07, 0.16_24, 0.10_16, -0.18_96, 0.02_63, 0.06_77, 0.23_10]],
[17, 0.55, [0.11_64, -0.02_16, 0.01_70, 0.15_89, -0.31_20, 0.10_05, -0.05_81, -0.14_58]],
[8, 0.89, [-0.17_58, -0.01_69, 0.10_04, -0.14_11, 0.13_12, 0.11_03, -0.19_96, 0.21_39]],
[3, 10_00, [0.12_14, 0.03_52, -0.07_31, -0.15_62, -0.09_94, -0.09_06, -0.23_40, -0.05_39]],
# fmt: on
] )
def lowerCamelCase__( self :str ,__snake_case :List[str] ,__snake_case :int ,__snake_case :List[Any] ) -> Union[str, Any]:
a__ , a__ = self.get_unet_model(model_id='stabilityai/stable-diffusion-2' ,fpaa=__snake_case )
a__ = self.get_latents(__snake_case ,shape=(4, 4, 96, 96) ,fpaa=__snake_case )
a__ = self.get_encoder_hidden_states(__snake_case ,shape=(4, 77, 10_24) ,fpaa=__snake_case )
a__ = model.apply(
{'params': params} ,__snake_case ,jnp.array(__snake_case ,dtype=jnp.intaa ) ,encoder_hidden_states=__snake_case ,).sample
assert sample.shape == latents.shape
a__ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) ,dtype=jnp.floataa )
a__ = jnp.array(__snake_case ,dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(__snake_case ,__snake_case ,atol=1E-2 )
| 657 |
import unittest
from knapsack import greedy_knapsack as kp
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[Any] ) -> Union[str, Any]:
a__ = [10, 20, 30, 40, 50, 60]
a__ = [2, 4, 6, 8, 10, 12]
a__ = 1_00
self.assertEqual(kp.calc_profit(__snake_case ,__snake_case ,__snake_case ) ,2_10 )
def lowerCamelCase__( self :str ) -> Optional[int]:
self.assertRaisesRegex(__snake_case ,'max_weight must greater than zero.' )
def lowerCamelCase__( self :Optional[Any] ) -> int:
self.assertRaisesRegex(__snake_case ,'Weight can not be negative.' )
def lowerCamelCase__( self :str ) -> List[str]:
self.assertRaisesRegex(__snake_case ,'Profit can not be negative.' )
def lowerCamelCase__( self :str ) -> Optional[Any]:
self.assertRaisesRegex(__snake_case ,'max_weight must greater than zero.' )
def lowerCamelCase__( self :int ) -> List[Any]:
self.assertRaisesRegex(
__snake_case ,'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 657 | 1 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
snake_case : Union[str, Any] = '''src/diffusers'''
snake_case : int = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
snake_case : int = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
snake_case : Union[str, Any] = spec.loader.load_module()
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : int ):
return line.startswith(__lowerCAmelCase ) or len(__lowerCAmelCase ) <= 1 or re.search(R'^\s*\)(\s*->.*:|:)\s*$' , __lowerCAmelCase ) is not None
def __lowercase ( __lowerCAmelCase : Any ):
a__ = object_name.split('.' )
a__ = 0
# First let's find the module where our object lives.
a__ = parts[i]
while i < len(__lowerCAmelCase ) and not os.path.isfile(os.path.join(__lowerCAmelCase , F'{module}.py' ) ):
i += 1
if i < len(__lowerCAmelCase ):
a__ = os.path.join(__lowerCAmelCase , parts[i] )
if i >= len(__lowerCAmelCase ):
raise ValueError(F'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(__lowerCAmelCase , F'{module}.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
a__ = f.readlines()
# Now let's find the class / func in the code!
a__ = ''
a__ = 0
for name in parts[i + 1 :]:
while (
line_index < len(__lowerCAmelCase ) and re.search(RF'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__lowerCAmelCase ):
raise ValueError(F' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
a__ = line_index
while line_index < len(__lowerCAmelCase ) and _should_continue(lines[line_index] , __lowerCAmelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
a__ = lines[start_index:line_index]
return "".join(__lowerCAmelCase )
snake_case : List[Any] = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
snake_case : Tuple = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''')
snake_case : str = re.compile(r'''<FILL\s+[^>]*>''')
def __lowercase ( __lowerCAmelCase : Union[str, Any] ):
a__ = code.split('\n' )
a__ = 0
while idx < len(__lowerCAmelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__lowerCAmelCase ):
return re.search(R'^(\s*)\S' , lines[idx] ).groups()[0]
return ""
def __lowercase ( __lowerCAmelCase : Any ):
a__ = len(get_indent(__lowerCAmelCase ) ) > 0
if has_indent:
a__ = F'class Bla:\n{code}'
a__ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 , preview=__lowerCAmelCase )
a__ = black.format_str(__lowerCAmelCase , mode=__lowerCAmelCase )
a__ , a__ = style_docstrings_in_code(__lowerCAmelCase )
return result[len('class Bla:\n' ) :] if has_indent else result
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict=False ):
with open(__lowerCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
a__ = f.readlines()
a__ = []
a__ = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__lowerCAmelCase ):
a__ = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
a__ , a__ , a__ = search.groups()
a__ = find_code_in_diffusers(__lowerCAmelCase )
a__ = get_indent(__lowerCAmelCase )
a__ = line_index + 1 if indent == theoretical_indent else line_index + 2
a__ = theoretical_indent
a__ = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
a__ = True
while line_index < len(__lowerCAmelCase ) and should_continue:
line_index += 1
if line_index >= len(__lowerCAmelCase ):
break
a__ = lines[line_index]
a__ = _should_continue(__lowerCAmelCase , __lowerCAmelCase ) and re.search(F'^{indent}# End copy' , __lowerCAmelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
a__ = lines[start_index:line_index]
a__ = ''.join(__lowerCAmelCase )
# Remove any nested `Copied from` comments to avoid circular copies
a__ = [line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(__lowerCAmelCase ) is None]
a__ = '\n'.join(__lowerCAmelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(__lowerCAmelCase ) > 0:
a__ = replace_pattern.replace('with' , '' ).split(',' )
a__ = [_re_replace_pattern.search(__lowerCAmelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
a__ , a__ , a__ = pattern.groups()
a__ = re.sub(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if option.strip() == "all-casing":
a__ = re.sub(obja.lower() , obja.lower() , __lowerCAmelCase )
a__ = re.sub(obja.upper() , obja.upper() , __lowerCAmelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
a__ = blackify(lines[start_index - 1] + theoretical_code )
a__ = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
a__ = lines[:start_index] + [theoretical_code] + lines[line_index:]
a__ = start_index + 1
if overwrite and len(__lowerCAmelCase ) > 0:
# Warn the user a file has been modified.
print(F'Detected changes, rewriting {filename}.' )
with open(__lowerCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(__lowerCAmelCase )
return diffs
def __lowercase ( __lowerCAmelCase : bool = False ):
a__ = glob.glob(os.path.join(__lowerCAmelCase , '**/*.py' ) , recursive=__lowerCAmelCase )
a__ = []
for filename in all_files:
a__ = is_copy_consistent(__lowerCAmelCase , __lowerCAmelCase )
diffs += [F'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(__lowerCAmelCase ) > 0:
a__ = '\n'.join(__lowerCAmelCase )
raise Exception(
'Found the following copy inconsistencies:\n'
+ diff
+ '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' )
if __name__ == "__main__":
snake_case : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
snake_case : str = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 657 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Any=1_0 ):
a__ = []
for _ in range(__lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]=1_0 ):
a__ = []
for step in range(__lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = os.path.join(__lowerCAmelCase , 'schedule.bin' )
torch.save(scheduler.state_dict() , __lowerCAmelCase )
a__ = torch.load(__lowerCAmelCase )
scheduler.load_state_dict(__lowerCAmelCase )
return lrs
@require_torch
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[Any] ,__snake_case :int ,__snake_case :Union[str, Any] ) -> int:
self.assertEqual(len(__snake_case ) ,len(__snake_case ) )
for a, b in zip(__snake_case ,__snake_case ):
self.assertAlmostEqual(__snake_case ,__snake_case ,delta=__snake_case )
def lowerCamelCase__( self :Optional[Any] ) -> str:
a__ = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=__snake_case )
a__ = torch.tensor([0.4, 0.2, -0.5] )
a__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
a__ = AdamW(params=[w] ,lr=2E-1 ,weight_decay=0.0 )
for _ in range(1_00 ):
a__ = criterion(__snake_case ,__snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1E-2 )
def lowerCamelCase__( self :Tuple ) -> int:
a__ = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=__snake_case )
a__ = torch.tensor([0.4, 0.2, -0.5] )
a__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
a__ = Adafactor(
params=[w] ,lr=1E-2 ,eps=(1E-30, 1E-3) ,clip_threshold=1.0 ,decay_rate=-0.8 ,betaa=__snake_case ,weight_decay=0.0 ,relative_step=__snake_case ,scale_parameter=__snake_case ,warmup_init=__snake_case ,)
for _ in range(10_00 ):
a__ = criterion(__snake_case ,__snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1E-2 )
@require_torch
class snake_case_ (unittest.TestCase ):
UpperCAmelCase__ : str = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None
UpperCAmelCase__ : Dict = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
UpperCAmelCase__ : Optional[Any] = 1_0
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Optional[int] ,__snake_case :Tuple ,__snake_case :int ,__snake_case :Any=None ) -> Optional[Any]:
self.assertEqual(len(__snake_case ) ,len(__snake_case ) )
for a, b in zip(__snake_case ,__snake_case ):
self.assertAlmostEqual(__snake_case ,__snake_case ,delta=__snake_case ,msg=__snake_case )
def lowerCamelCase__( self :Tuple ) -> List[Any]:
a__ = {'num_warmup_steps': 2, 'num_training_steps': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
a__ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1E-7},
[0.0, 5.0, 10.0, 7.6_56, 5.6_25, 3.9_06, 2.5, 1.4_06, 0.6_25, 0.1_56],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 10.0, 8.1_65, 7.0_71, 6.3_25, 5.7_74, 5.3_45, 5.0, 4.7_14],
),
}
for scheduler_func, data in scheds.items():
a__ , a__ = data
a__ = scheduler_func(self.optimizer ,**__snake_case )
self.assertEqual(len([scheduler.get_lr()[0]] ) ,1 )
a__ = unwrap_schedule(__snake_case ,self.num_steps )
self.assertListAlmostEqual(
__snake_case ,__snake_case ,tol=1E-2 ,msg=F'failed for {scheduler_func} in normal scheduler' ,)
a__ = scheduler_func(self.optimizer ,**__snake_case )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(__snake_case ) # wrap to test picklability of the schedule
a__ = unwrap_and_save_reload_schedule(__snake_case ,self.num_steps )
self.assertListEqual(__snake_case ,__snake_case ,msg=F'failed for {scheduler_func} in save and reload' )
class snake_case_ :
def __init__( self :Tuple ,__snake_case :str ) -> Any:
a__ = fn
def __call__( self :List[str] ,*__snake_case :Optional[Any] ,**__snake_case :Optional[int] ) -> Union[str, Any]:
return self.fn(*__snake_case ,**__snake_case )
@classmethod
def lowerCamelCase__( self :Tuple ,__snake_case :Union[str, Any] ) -> Dict:
a__ = list(map(self ,scheduler.lr_lambdas ) )
| 657 | 1 |
def __lowercase ( __lowerCAmelCase : int ):
if num <= 0:
raise ValueError('Input must be a positive integer' )
a__ = [True] * (num + 1)
a__ = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , __lowerCAmelCase ):
a__ = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case : int = int(input('''Enter a positive integer: ''').strip())
print(prime_sieve_eratosthenes(user_num))
| 657 |
from __future__ import annotations
def __lowercase ( __lowerCAmelCase : list[int] ): # This function is recursive
a__ = len(__lowerCAmelCase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
a__ = array[0]
a__ = False
a__ = 1
a__ = []
while not is_found and i < array_length:
if array[i] < pivot:
a__ = True
a__ = [element for element in array[i:] if element >= array[i]]
a__ = longest_subsequence(__lowerCAmelCase )
if len(__lowerCAmelCase ) > len(__lowerCAmelCase ):
a__ = temp_array
else:
i += 1
a__ = [element for element in array[1:] if element >= pivot]
a__ = [pivot, *longest_subsequence(__lowerCAmelCase )]
if len(__lowerCAmelCase ) > len(__lowerCAmelCase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class snake_case_ (unittest.TestCase ):
def __init__( self :Optional[int] ,__snake_case :Optional[int] ,__snake_case :Optional[Any]=7 ,__snake_case :Any=3 ,__snake_case :int=30 ,__snake_case :Union[str, Any]=4_00 ,__snake_case :str=True ,__snake_case :List[str]=None ,__snake_case :List[Any]=True ,__snake_case :Dict=[0.5, 0.5, 0.5] ,__snake_case :Optional[Any]=[0.5, 0.5, 0.5] ,__snake_case :int=True ,__snake_case :int=1 / 2_55 ,__snake_case :List[str]=True ,) -> List[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
a__ = size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
a__ = parent
a__ = batch_size
a__ = num_channels
a__ = min_resolution
a__ = max_resolution
a__ = do_resize
a__ = size
a__ = do_normalize
a__ = image_mean
a__ = image_std
a__ = do_rescale
a__ = rescale_factor
a__ = do_pad
def lowerCamelCase__( self :str ) -> Tuple:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCamelCase__( self :Dict ,__snake_case :Optional[Any] ,__snake_case :Tuple=False ) -> Tuple:
if not batched:
a__ = image_inputs[0]
if isinstance(__snake_case ,Image.Image ):
a__ , a__ = image.size
else:
a__ , a__ = image.shape[1], image.shape[2]
if w < h:
a__ = int(self.size['shortest_edge'] * h / w )
a__ = self.size['shortest_edge']
elif w > h:
a__ = self.size['shortest_edge']
a__ = int(self.size['shortest_edge'] * w / h )
else:
a__ = self.size['shortest_edge']
a__ = self.size['shortest_edge']
else:
a__ = []
for image in image_inputs:
a__ , a__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a__ = max(__snake_case ,key=lambda __snake_case : item[0] )[0]
a__ = max(__snake_case ,key=lambda __snake_case : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case_ (lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Any = DeformableDetrImageProcessor if is_vision_available() else None
def lowerCamelCase__( self :Union[str, Any] ) -> Dict:
a__ = DeformableDetrImageProcessingTester(self )
@property
def lowerCamelCase__( self :List[str] ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase__( self :List[str] ) -> Dict:
a__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case ,'image_mean' ) )
self.assertTrue(hasattr(__snake_case ,'image_std' ) )
self.assertTrue(hasattr(__snake_case ,'do_normalize' ) )
self.assertTrue(hasattr(__snake_case ,'do_resize' ) )
self.assertTrue(hasattr(__snake_case ,'do_rescale' ) )
self.assertTrue(hasattr(__snake_case ,'do_pad' ) )
self.assertTrue(hasattr(__snake_case ,'size' ) )
def lowerCamelCase__( self :int ) -> List[str]:
a__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'shortest_edge': 18, 'longest_edge': 13_33} )
self.assertEqual(image_processor.do_pad ,__snake_case )
a__ = self.image_processing_class.from_dict(
self.image_processor_dict ,size=42 ,max_size=84 ,pad_and_return_pixel_mask=__snake_case )
self.assertEqual(image_processor.size ,{'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad ,__snake_case )
def lowerCamelCase__( self :str ) -> List[Any]:
pass
def lowerCamelCase__( self :Tuple ) -> Dict:
# Initialize image_processing
a__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case ,Image.Image )
# Test not batched input
a__ = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
a__ , a__ = self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
a__ , a__ = self.image_processor_tester.get_expected_values(__snake_case ,batched=__snake_case )
a__ = image_processing(__snake_case ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def lowerCamelCase__( self :int ) -> str:
# Initialize image_processing
a__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__snake_case ,numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case ,np.ndarray )
# Test not batched input
a__ = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
a__ , a__ = self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
a__ = image_processing(__snake_case ,return_tensors='pt' ).pixel_values
a__ , a__ = self.image_processor_tester.get_expected_values(__snake_case ,batched=__snake_case )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def lowerCamelCase__( self :Optional[Any] ) -> str:
# Initialize image_processing
a__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__snake_case ,torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case ,torch.Tensor )
# Test not batched input
a__ = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
a__ , a__ = self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
a__ = image_processing(__snake_case ,return_tensors='pt' ).pixel_values
a__ , a__ = self.image_processor_tester.get_expected_values(__snake_case ,batched=__snake_case )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def lowerCamelCase__( self :Optional[Any] ) -> Optional[Any]:
# prepare image and target
a__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' ,'r' ) as f:
a__ = json.loads(f.read() )
a__ = {'image_id': 3_97_69, 'annotations': target}
# encode them
a__ = DeformableDetrImageProcessor()
a__ = image_processing(images=__snake_case ,annotations=__snake_case ,return_tensors='pt' )
# verify pixel values
a__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape ,__snake_case )
a__ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,__snake_case ,atol=1E-4 ) )
# verify area
a__ = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,__snake_case ) )
# verify boxes
a__ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,__snake_case )
a__ = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,__snake_case ,atol=1E-3 ) )
# verify image_id
a__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,__snake_case ) )
# verify is_crowd
a__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,__snake_case ) )
# verify class_labels
a__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,__snake_case ) )
# verify orig_size
a__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,__snake_case ) )
# verify size
a__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,__snake_case ) )
@slow
def lowerCamelCase__( self :str ) -> List[str]:
# prepare image, target and masks_path
a__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' ,'r' ) as f:
a__ = json.loads(f.read() )
a__ = {'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
a__ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
a__ = DeformableDetrImageProcessor(format='coco_panoptic' )
a__ = image_processing(images=__snake_case ,annotations=__snake_case ,masks_path=__snake_case ,return_tensors='pt' )
# verify pixel values
a__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape ,__snake_case )
a__ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,__snake_case ,atol=1E-4 ) )
# verify area
a__ = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,__snake_case ) )
# verify boxes
a__ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,__snake_case )
a__ = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,__snake_case ,atol=1E-3 ) )
# verify image_id
a__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,__snake_case ) )
# verify is_crowd
a__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,__snake_case ) )
# verify class_labels
a__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,__snake_case ) )
# verify masks
a__ = 82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() ,__snake_case )
# verify orig_size
a__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,__snake_case ) )
# verify size
a__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,__snake_case ) )
| 657 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
snake_case : Dict = logging.get_logger(__name__)
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Dict = ['''pixel_values''']
def __init__( self :Optional[Any] ,__snake_case :bool = True ,__snake_case :int = 32 ,__snake_case :Union[str, Any]=PILImageResampling.BILINEAR ,__snake_case :bool = True ,**__snake_case :Tuple ,) -> None:
a__ = do_resize
a__ = do_rescale
a__ = size_divisor
a__ = resample
super().__init__(**__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :np.ndarray ,__snake_case :int ,__snake_case :Tuple ,__snake_case :Optional[ChannelDimension] = None ,**__snake_case :List[Any] ) -> np.ndarray:
a__ , a__ = get_image_size(__snake_case )
# Rounds the height and width down to the closest multiple of size_divisor
a__ = height // size_divisor * size_divisor
a__ = width // size_divisor * size_divisor
a__ = resize(__snake_case ,(new_h, new_w) ,resample=__snake_case ,data_format=__snake_case ,**__snake_case )
return image
def lowerCamelCase__( self :List[str] ,__snake_case :np.ndarray ,__snake_case :float ,__snake_case :Optional[ChannelDimension] = None ,**__snake_case :str ) -> np.ndarray:
return rescale(image=__snake_case ,scale=__snake_case ,data_format=__snake_case ,**__snake_case )
def lowerCamelCase__( self :Tuple ,__snake_case :Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] ,__snake_case :Optional[bool] = None ,__snake_case :Optional[int] = None ,__snake_case :Union[str, Any]=None ,__snake_case :Optional[bool] = None ,__snake_case :Optional[Union[TensorType, str]] = None ,__snake_case :ChannelDimension = ChannelDimension.FIRST ,**__snake_case :List[Any] ,) -> BatchFeature:
a__ = do_resize if do_resize is not None else self.do_resize
a__ = do_rescale if do_rescale is not None else self.do_rescale
a__ = size_divisor if size_divisor is not None else self.size_divisor
a__ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
a__ = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
a__ = [to_numpy_array(__snake_case ) for img in images]
if do_resize:
a__ = [self.resize(__snake_case ,size_divisor=__snake_case ,resample=__snake_case ) for image in images]
if do_rescale:
a__ = [self.rescale(__snake_case ,scale=1 / 2_55 ) for image in images]
a__ = [to_channel_dimension_format(__snake_case ,__snake_case ) for image in images]
a__ = {'pixel_values': images}
return BatchFeature(data=__snake_case ,tensor_type=__snake_case )
| 657 | 1 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
snake_case : int = datasets.utils.logging.get_logger(__name__)
snake_case : List[Any] = ['''names''', '''prefix''']
snake_case : List[Any] = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
snake_case : List[Any] = ['''encoding_errors''', '''on_bad_lines''']
snake_case : str = ['''date_format''']
@dataclass
class snake_case_ (datasets.BuilderConfig ):
UpperCAmelCase__ : str = ","
UpperCAmelCase__ : Optional[str] = None
UpperCAmelCase__ : Optional[Union[int, List[int], str]] = "infer"
UpperCAmelCase__ : Optional[List[str]] = None
UpperCAmelCase__ : Optional[List[str]] = None
UpperCAmelCase__ : Optional[Union[int, str, List[int], List[str]]] = None
UpperCAmelCase__ : Optional[Union[List[int], List[str]]] = None
UpperCAmelCase__ : Optional[str] = None
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : Optional[Literal["c", "python", "pyarrow"]] = None
UpperCAmelCase__ : Dict[Union[int, str], Callable[[Any], Any]] = None
UpperCAmelCase__ : Optional[list] = None
UpperCAmelCase__ : Optional[list] = None
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : Optional[Union[int, List[int]]] = None
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Optional[Union[str, List[str]]] = None
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : Optional[str] = None
UpperCAmelCase__ : str = "."
UpperCAmelCase__ : Optional[str] = None
UpperCAmelCase__ : str = '"'
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : Optional[str] = None
UpperCAmelCase__ : Optional[str] = None
UpperCAmelCase__ : Optional[str] = None
UpperCAmelCase__ : Optional[str] = None
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : Optional[str] = None
UpperCAmelCase__ : int = 1_0_0_0_0
UpperCAmelCase__ : Optional[datasets.Features] = None
UpperCAmelCase__ : Optional[str] = "strict"
UpperCAmelCase__ : Literal["error", "warn", "skip"] = "error"
UpperCAmelCase__ : Optional[str] = None
def lowerCamelCase__( self :str ) -> Optional[int]:
if self.delimiter is not None:
a__ = self.delimiter
if self.column_names is not None:
a__ = self.column_names
@property
def lowerCamelCase__( self :Tuple ) -> str:
a__ = {
'sep': self.sep,
'header': self.header,
'names': self.names,
'index_col': self.index_col,
'usecols': self.usecols,
'prefix': self.prefix,
'mangle_dupe_cols': self.mangle_dupe_cols,
'engine': self.engine,
'converters': self.converters,
'true_values': self.true_values,
'false_values': self.false_values,
'skipinitialspace': self.skipinitialspace,
'skiprows': self.skiprows,
'nrows': self.nrows,
'na_values': self.na_values,
'keep_default_na': self.keep_default_na,
'na_filter': self.na_filter,
'verbose': self.verbose,
'skip_blank_lines': self.skip_blank_lines,
'thousands': self.thousands,
'decimal': self.decimal,
'lineterminator': self.lineterminator,
'quotechar': self.quotechar,
'quoting': self.quoting,
'escapechar': self.escapechar,
'comment': self.comment,
'encoding': self.encoding,
'dialect': self.dialect,
'error_bad_lines': self.error_bad_lines,
'warn_bad_lines': self.warn_bad_lines,
'skipfooter': self.skipfooter,
'doublequote': self.doublequote,
'memory_map': self.memory_map,
'float_precision': self.float_precision,
'chunksize': self.chunksize,
'encoding_errors': self.encoding_errors,
'on_bad_lines': self.on_bad_lines,
'date_format': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() ,__snake_case ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class snake_case_ (datasets.ArrowBasedBuilder ):
UpperCAmelCase__ : Optional[Any] = CsvConfig
def lowerCamelCase__( self :Union[str, Any] ) -> List[str]:
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase__( self :str ,__snake_case :Tuple ) -> int:
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
a__ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__snake_case ,(str, list, tuple) ):
a__ = data_files
if isinstance(__snake_case ,__snake_case ):
a__ = [files]
a__ = [dl_manager.iter_files(__snake_case ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={'files': files} )]
a__ = []
for split_name, files in data_files.items():
if isinstance(__snake_case ,__snake_case ):
a__ = [files]
a__ = [dl_manager.iter_files(__snake_case ) for file in files]
splits.append(datasets.SplitGenerator(name=__snake_case ,gen_kwargs={'files': files} ) )
return splits
def lowerCamelCase__( self :int ,__snake_case :pa.Table ) -> pa.Table:
if self.config.features is not None:
a__ = self.config.features.arrow_schema
if all(not require_storage_cast(__snake_case ) for feature in self.config.features.values() ):
# cheaper cast
a__ = pa.Table.from_arrays([pa_table[field.name] for field in schema] ,schema=__snake_case )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
a__ = table_cast(__snake_case ,__snake_case )
return pa_table
def lowerCamelCase__( self :Tuple ,__snake_case :Optional[Any] ) -> Dict:
a__ = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
a__ = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(__snake_case ) else object
for name, dtype, feature in zip(schema.names ,schema.types ,self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(__snake_case ) ):
a__ = pd.read_csv(__snake_case ,iterator=__snake_case ,dtype=__snake_case ,**self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(__snake_case ):
a__ = pa.Table.from_pandas(__snake_case )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__snake_case )
except ValueError as e:
logger.error(F'Failed to read file \'{file}\' with error {type(__snake_case )}: {e}' )
raise
| 657 |
def __lowercase ( __lowerCAmelCase : int ):
a__ = generate_pascal_triangle(__lowerCAmelCase )
for row_idx in range(__lowerCAmelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=' ' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=' ' )
else:
print(triangle[row_idx][col_idx] , end='' )
print()
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
a__ = []
for current_row_idx in range(__lowerCAmelCase ):
a__ = populate_current_row(__lowerCAmelCase , __lowerCAmelCase )
triangle.append(__lowerCAmelCase )
return triangle
def __lowercase ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : int ):
a__ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
a__ , a__ = 1, 1
for current_col_idx in range(1 , __lowerCAmelCase ):
calculate_current_element(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return current_row
def __lowercase ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , ):
a__ = triangle[current_row_idx - 1][current_col_idx - 1]
a__ = triangle[current_row_idx - 1][current_col_idx]
a__ = above_to_left_elt + above_to_right_elt
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
a__ = [[1]]
for row_index in range(1 , __lowerCAmelCase ):
a__ = [0] + result[-1] + [0]
a__ = row_index + 1
# Calculate the number of distinct elements in a row
a__ = sum(divmod(__lowerCAmelCase , 2 ) )
a__ = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
a__ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
a__ = row_first_half + row_second_half
result.append(__lowerCAmelCase )
return result
def __lowercase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__lowerCAmelCase : Callable , __lowerCAmelCase : int ) -> None:
a__ = F'{func.__name__}({value})'
a__ = timeit(F'__main__.{call}' , setup='import __main__' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'{call:38} -- {timing:.4f} seconds' )
for value in range(1_5 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__lowerCAmelCase , __lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 657 | 1 |
from __future__ import annotations
def __lowercase ( __lowerCAmelCase : list[float] ):
if len(__lowerCAmelCase ) < 2:
raise ValueError('Monogons and Digons are not polygons in the Euclidean space' )
if any(i <= 0 for i in nums ):
raise ValueError('All values must be greater than 0' )
a__ = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
snake_case : str = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
snake_case : str = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
snake_case : str = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
snake_case : Tuple = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
snake_case : str = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]),
('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
snake_case : Tuple = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
snake_case : int = (
('''JH AH TH KH QH''', 23),
('''JH 9H TH KH QH''', 22),
('''JC KH JS JD JH''', 21),
('''KH KC 3S 3H 3D''', 20),
('''8C 9C 5C 3C TC''', 19),
('''JS QS 9H TS KH''', 18),
('''7C 7S KH 2H 7H''', 17),
('''3C KH 5D 5S KH''', 16),
('''QH 8H KD JH 8S''', 15),
('''2D 6D 9D TH 7D''', 14),
)
def __lowercase ( ):
a__ , a__ = randrange(len(__lowerCAmelCase ) ), randrange(len(__lowerCAmelCase ) )
a__ = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
a__ , a__ = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __lowercase ( __lowerCAmelCase : int = 1_0_0 ):
return (generate_random_hand() for _ in range(__lowerCAmelCase ))
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] ):
assert PokerHand(__lowerCAmelCase )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
assert PokerHand(__lowerCAmelCase )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ):
a__ = PokerHand(__lowerCAmelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] ):
assert PokerHand(__lowerCAmelCase ).compare_with(PokerHand(__lowerCAmelCase ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase ).compare_with(PokerHand(__lowerCAmelCase ) ) == expected
def __lowercase ( ):
a__ = [PokerHand(__lowerCAmelCase ) for hand in SORTED_HANDS]
a__ = poker_hands.copy()
shuffle(__lowerCAmelCase )
a__ = chain(sorted(__lowerCAmelCase ) )
for index, hand in enumerate(__lowerCAmelCase ):
assert hand == poker_hands[index]
def __lowercase ( ):
# Test that five high straights are compared correctly.
a__ = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=__lowerCAmelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __lowercase ( ):
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
a__ = PokerHand('2C 4S AS 3D 5C' )
a__ = True
a__ = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __lowercase ( ):
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
a__ = 0
a__ = os.path.abspath(os.path.dirname(__lowerCAmelCase ) )
a__ = os.path.join(__lowerCAmelCase , 'poker_hands.txt' )
with open(__lowerCAmelCase ) as file_hand:
for line in file_hand:
a__ = line[:1_4].strip()
a__ = line[1_5:].strip()
a__ , a__ = PokerHand(__lowerCAmelCase ), PokerHand(__lowerCAmelCase )
a__ = player.compare_with(__lowerCAmelCase )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 657 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case : Tuple = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : int = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Tuple = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[str] = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
snake_case : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 |
def __lowercase ( __lowerCAmelCase : int ):
if length <= 0 or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(__lowerCAmelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 657 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case : List[Any] = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Any = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[Any] = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[Any] = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
snake_case : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 |
def __lowercase ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int ):
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError('The length of profit and weight must be same.' )
if max_weight <= 0:
raise ValueError('max_weight must greater than zero.' )
if any(p < 0 for p in profit ):
raise ValueError('Profit can not be negative.' )
if any(w < 0 for w in weight ):
raise ValueError('Weight can not be negative.' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
a__ = [p / w for p, w in zip(__lowerCAmelCase , __lowerCAmelCase )]
# Creating a copy of the list and sorting profit/weight in ascending order
a__ = sorted(__lowerCAmelCase )
# declaring useful variables
a__ = len(__lowerCAmelCase )
a__ = 0
a__ = 0
a__ = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
a__ = sorted_profit_by_weight[length - i - 1]
a__ = profit_by_weight.index(__lowerCAmelCase )
a__ = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'''Input profits, weights, and then max_weight (all positive ints) separated by '''
'''spaces.'''
)
snake_case : Tuple = [int(x) for x in input('''Input profits separated by spaces: ''').split()]
snake_case : Optional[int] = [int(x) for x in input('''Input weights separated by spaces: ''').split()]
snake_case : List[str] = int(input('''Max weight allowed: '''))
# Function Call
calc_profit(profit, weight, max_weight)
| 657 | 1 |
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class snake_case_ (lowerCamelCase_ ):
def lowerCamelCase__( self :Union[str, Any] ) -> Any:
a__ = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type ,pa.intaa() )
def lowerCamelCase__( self :Dict ) -> List[Any]:
with self.assertRaises(__snake_case ):
a__ = pa.array(TypedSequence([1, 2, 3] ) ,type=pa.intaa() )
def lowerCamelCase__( self :List[str] ) -> List[str]:
with self.assertRaises(__snake_case ):
a__ = pa.array(TypedSequence([1, 2, 3] ,try_type=Value('bool' ) ,type=Value('int64' ) ) )
def lowerCamelCase__( self :Union[str, Any] ) -> Any:
a__ = pa.array(TypedSequence([1, 2, 3] ,type=Value('int32' ) ) )
self.assertEqual(arr.type ,pa.intaa() )
def lowerCamelCase__( self :List[Any] ) -> Any:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
a__ = pa.array(TypedSequence(['foo', 'bar'] ,type=Value('int64' ) ) )
def lowerCamelCase__( self :Optional[int] ) -> Optional[int]:
a__ = pa.array(TypedSequence([1, 2, 3] ,try_type=Value('int32' ) ) )
self.assertEqual(arr.type ,pa.intaa() )
def lowerCamelCase__( self :List[str] ) -> str:
a__ = pa.array(TypedSequence(['foo', 'bar'] ,try_type=Value('int64' ) ) )
self.assertEqual(arr.type ,pa.string() )
def lowerCamelCase__( self :Tuple ) -> List[str]:
a__ = pa.array(TypedSequence([[[1, 2, 3]]] ,type=ArrayaD((1, 3) ,'int64' ) ) )
self.assertEqual(arr.type ,ArrayaDExtensionType((1, 3) ,'int64' ) )
def lowerCamelCase__( self :List[Any] ) -> Optional[Any]:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
a__ = pa.array(TypedSequence(['foo', 'bar'] ,type=ArrayaD((1, 3) ,'int64' ) ) )
def lowerCamelCase__( self :Dict ) -> Tuple:
a__ = pa.array(TypedSequence([[[1, 2, 3]]] ,try_type=ArrayaD((1, 3) ,'int64' ) ) )
self.assertEqual(arr.type ,ArrayaDExtensionType((1, 3) ,'int64' ) )
def lowerCamelCase__( self :Tuple ) -> Optional[Any]:
a__ = pa.array(TypedSequence(['foo', 'bar'] ,try_type=ArrayaD((1, 3) ,'int64' ) ) )
self.assertEqual(arr.type ,pa.string() )
@require_pil
def lowerCamelCase__( self :Optional[int] ) -> str:
import PIL.Image
a__ = PIL.Image.fromarray(np.arange(10 ,dtype=np.uinta ).reshape(2 ,5 ) )
with patch(
'datasets.arrow_writer.cast_to_python_objects' ,side_effect=__snake_case ) as mock_cast_to_python_objects:
a__ = pa.array(TypedSequence([{'path': None, 'bytes': B'image_bytes'}, pil_image] ,type=Image() ) )
a__ , a__ = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn('optimize_list_casting' ,__snake_case )
self.assertFalse(kwargs['optimize_list_casting'] )
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int ):
a__ = pa.BufferReader(__lowerCAmelCase ) if isinstance(__lowerCAmelCase , pa.Buffer ) else pa.memory_map(__lowerCAmelCase )
a__ = pa.ipc.open_stream(__lowerCAmelCase )
a__ = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 1_0] )
@pytest.mark.parametrize(
'fields' , [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] ):
a__ = pa.BufferOutputStream()
a__ = pa.schema(__lowerCAmelCase ) if fields else None
with ArrowWriter(stream=__lowerCAmelCase , schema=__lowerCAmelCase , writer_batch_size=__lowerCAmelCase ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
a__ , a__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
a__ = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(__lowerCAmelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowercase ( ):
a__ = pa.BufferOutputStream()
a__ = Features({'labels': ClassLabel(names=['neg', 'pos'] )} )
with ArrowWriter(stream=__lowerCAmelCase , features=__lowerCAmelCase ) as writer:
writer.write({'labels': 0} )
writer.write({'labels': 1} )
a__ , a__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
a__ = pa.BufferReader(output.getvalue() )
a__ = pa.ipc.open_stream(__lowerCAmelCase )
a__ = f.read_all()
a__ = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(__lowerCAmelCase )
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 1_0] )
def __lowercase ( __lowerCAmelCase : Dict ):
a__ = pa.BufferOutputStream()
with ArrowWriter(
stream=__lowerCAmelCase , writer_batch_size=__lowerCAmelCase , hash_salt='split_name' , check_duplicates=__lowerCAmelCase , ) as writer:
with pytest.raises(__lowerCAmelCase ):
writer.write({'col_1': 'foo', 'col_2': 1} , key=[1, 2] )
a__ , a__ = writer.finalize()
@pytest.mark.parametrize('writer_batch_size' , [None, 2, 1_0] )
def __lowercase ( __lowerCAmelCase : List[Any] ):
a__ = pa.BufferOutputStream()
with ArrowWriter(
stream=__lowerCAmelCase , writer_batch_size=__lowerCAmelCase , hash_salt='split_name' , check_duplicates=__lowerCAmelCase , ) as writer:
with pytest.raises(__lowerCAmelCase ):
writer.write({'col_1': 'foo', 'col_2': 1} , key=1_0 )
writer.write({'col_1': 'bar', 'col_2': 2} , key=1_0 )
a__ , a__ = writer.finalize()
@pytest.mark.parametrize('writer_batch_size' , [None, 2, 1_0] )
def __lowercase ( __lowerCAmelCase : Optional[Any] ):
a__ = pa.BufferOutputStream()
with ArrowWriter(
stream=__lowerCAmelCase , writer_batch_size=__lowerCAmelCase , hash_salt='split_name' , check_duplicates=__lowerCAmelCase , ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} , key=1 )
writer.write({'col_1': 'bar', 'col_2': 2} , key=2 )
a__ , a__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 1_0] )
@pytest.mark.parametrize(
'fields' , [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Dict ):
a__ = pa.BufferOutputStream()
a__ = pa.schema(__lowerCAmelCase ) if fields else None
with ArrowWriter(stream=__lowerCAmelCase , schema=__lowerCAmelCase , writer_batch_size=__lowerCAmelCase ) as writer:
writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} )
writer.write_batch({'col_1': [], 'col_2': []} )
a__ , a__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
a__ = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(__lowerCAmelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 1_0] )
@pytest.mark.parametrize(
'fields' , [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ):
a__ = pa.BufferOutputStream()
a__ = pa.schema(__lowerCAmelCase ) if fields else None
with ArrowWriter(stream=__lowerCAmelCase , schema=__lowerCAmelCase , writer_batch_size=__lowerCAmelCase ) as writer:
writer.write_table(pa.Table.from_pydict({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} ) )
a__ , a__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
a__ = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(__lowerCAmelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 1_0] )
@pytest.mark.parametrize(
'fields' , [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Dict ):
a__ = pa.BufferOutputStream()
a__ = pa.schema(__lowerCAmelCase ) if fields else None
with ArrowWriter(stream=__lowerCAmelCase , schema=__lowerCAmelCase , writer_batch_size=__lowerCAmelCase ) as writer:
writer.write_row(pa.Table.from_pydict({'col_1': ['foo'], 'col_2': [1]} ) )
writer.write_row(pa.Table.from_pydict({'col_1': ['bar'], 'col_2': [2]} ) )
a__ , a__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
a__ = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(__lowerCAmelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowercase ( ):
with tempfile.TemporaryDirectory() as tmp_dir:
a__ = {'col_1': pa.string(), 'col_2': pa.intaa()}
a__ = os.path.join(__lowerCAmelCase , 'test.arrow' )
with ArrowWriter(path=__lowerCAmelCase , schema=pa.schema(__lowerCAmelCase ) ) as writer:
writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} )
a__ , a__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(__lowerCAmelCase , metadata=writer._schema.metadata )
_check_output(__lowerCAmelCase , 1 )
def __lowercase ( __lowerCAmelCase : Tuple ):
if pa.types.is_list(__lowerCAmelCase ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str ):
if isinstance(lst[0] , __lowerCAmelCase ):
change_first_primitive_element_in_list(lst[0] , __lowerCAmelCase )
else:
a__ = value
@pytest.mark.parametrize('optimized_int_type, expected_dtype' , [(None, pa.intaa()), (Value('int32' ), pa.intaa())] )
@pytest.mark.parametrize('sequence' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str ):
a__ = pa.array(TypedSequence(__lowerCAmelCase , optimized_int_type=__lowerCAmelCase ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
'col, expected_dtype' , [
('attention_mask', pa.inta()),
('special_tokens_mask', pa.inta()),
('token_type_ids', pa.inta()),
('input_ids', pa.intaa()),
('other', pa.intaa()),
] , )
@pytest.mark.parametrize('sequence' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : str ):
# in range
a__ = pa.array(OptimizedTypedSequence(__lowerCAmelCase , col=__lowerCAmelCase ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
a__ = copy.deepcopy(__lowerCAmelCase )
a__ = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(__lowerCAmelCase , __lowerCAmelCase )
a__ = pa.array(OptimizedTypedSequence(__lowerCAmelCase , col=__lowerCAmelCase ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize('raise_exception' , [False, True] )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : List[Any] ):
a__ = str(tmp_path / 'dataset-train.arrow' )
try:
with ArrowWriter(path=__lowerCAmelCase ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def __lowercase ( __lowerCAmelCase : Optional[Any] ):
a__ = 'mock://dataset-train.arrow'
with ArrowWriter(path=__lowerCAmelCase , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(__lowerCAmelCase ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
a__ , a__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(__lowerCAmelCase )
def __lowercase ( ):
a__ = pa.BufferOutputStream()
with ParquetWriter(stream=__lowerCAmelCase ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
a__ , a__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
a__ = pa.BufferReader(output.getvalue() )
a__ = pq.read_table(__lowerCAmelCase )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize('embed_local_files' , [False, True] )
def __lowercase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int ):
import PIL.Image
a__ = str(tmp_path / 'test_image_rgb.jpg' )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(__lowerCAmelCase , format='png' )
a__ = pa.BufferOutputStream()
with ParquetWriter(
stream=__lowerCAmelCase , features=Features({'image': Image()} ) , embed_local_files=__lowerCAmelCase ) as writer:
writer.write({'image': image_path} )
writer.finalize()
a__ = pa.BufferReader(output.getvalue() )
a__ = pq.read_table(__lowerCAmelCase )
a__ = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out['image'][0]['path'] , __lowerCAmelCase )
with open(__lowerCAmelCase , 'rb' ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def __lowercase ( ):
a__ = pa.schema([pa.field('col_1' , pa.string() , nullable=__lowerCAmelCase )] )
a__ = pa.BufferOutputStream()
with ArrowWriter(stream=__lowerCAmelCase ) as writer:
writer._build_writer(inferred_schema=__lowerCAmelCase )
assert writer._schema == pa.schema([pa.field('col_1' , pa.string() )] )
| 657 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case : Optional[Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[int] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
snake_case : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 1 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : complex , __lowerCAmelCase : str = "x" , __lowerCAmelCase : float = 1_0**-1_0 , __lowerCAmelCase : int = 1 , ):
a__ = symbols(__lowerCAmelCase )
a__ = lambdify(__lowerCAmelCase , __lowerCAmelCase )
a__ = lambdify(__lowerCAmelCase , diff(__lowerCAmelCase , __lowerCAmelCase ) )
a__ = starting_point
while True:
if diff_function(__lowerCAmelCase ) != 0:
a__ = prev_guess - multiplicity * func(__lowerCAmelCase ) / diff_function(
__lowerCAmelCase )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
a__ = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}""")
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 657 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class snake_case_ :
def __init__( self :Optional[Any] ,__snake_case :str ,__snake_case :Optional[Any]=14 ,__snake_case :Dict=7 ,__snake_case :Optional[int]=True ,__snake_case :Optional[int]=True ,__snake_case :Dict=True ,__snake_case :List[Any]=True ,__snake_case :Optional[int]=True ,__snake_case :Any=99 ,__snake_case :List[str]=32 ,__snake_case :List[str]=5 ,__snake_case :Tuple=4 ,__snake_case :Optional[int]=37 ,__snake_case :Optional[int]="gelu" ,__snake_case :Tuple=0.1 ,__snake_case :Tuple=0.1 ,__snake_case :Dict=5_12 ,__snake_case :Union[str, Any]=16 ,__snake_case :str=2 ,__snake_case :Optional[Any]=0.02 ,__snake_case :Dict=3 ,__snake_case :Optional[Any]=4 ,__snake_case :Optional[Any]=None ,) -> Tuple:
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_token_type_ids
a__ = use_input_mask
a__ = use_labels
a__ = use_mc_token_ids
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_labels
a__ = num_choices
a__ = scope
a__ = self.vocab_size - 1
def lowerCamelCase__( self :Optional[int] ) -> Union[str, Any]:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
a__ = None
if self.use_input_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
a__ = None
if self.use_mc_token_ids:
a__ = ids_tensor([self.batch_size, self.num_choices] ,self.seq_length )
a__ = None
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
a__ = ids_tensor([self.batch_size] ,self.num_choices )
a__ = self.get_config()
a__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase__( self :Optional[Any] ) -> Tuple:
return CTRLConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
def lowerCamelCase__( self :str ,__snake_case :List[str] ,__snake_case :Any ,__snake_case :Dict ,__snake_case :int ,__snake_case :Optional[Any] ,*__snake_case :List[str] ) -> List[Any]:
a__ = CTRLModel(config=__snake_case )
model.to(__snake_case )
model.eval()
model(__snake_case ,token_type_ids=__snake_case ,head_mask=__snake_case )
model(__snake_case ,token_type_ids=__snake_case )
a__ = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) ,config.n_layer )
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[str] ,__snake_case :Union[str, Any] ,__snake_case :str ,__snake_case :str ,__snake_case :Dict ,*__snake_case :Dict ) -> Dict:
a__ = CTRLLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__( self :Optional[Any] ) -> Optional[Any]:
a__ = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) = config_and_inputs
a__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return config, inputs_dict
def lowerCamelCase__( self :Optional[int] ,__snake_case :Tuple ,__snake_case :str ,__snake_case :str ,__snake_case :List[str] ,*__snake_case :Optional[int] ) -> List[Any]:
a__ = self.num_labels
a__ = CTRLForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
@require_torch
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase__ : Any = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCAmelCase__ : Any = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : List[str] = False
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Optional[int] ,__snake_case :int ,__snake_case :Any ,__snake_case :List[str] ,__snake_case :Dict ) -> Union[str, Any]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowerCamelCase__( self :int ) -> List[str]:
a__ = CTRLModelTester(self )
a__ = ConfigTester(self ,config_class=__snake_case ,n_embd=37 )
def lowerCamelCase__( self :str ) -> str:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__( self :Tuple ) -> List[Any]:
self.config_tester.run_common_tests()
def lowerCamelCase__( self :str ) -> str:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__snake_case )
def lowerCamelCase__( self :List[Any] ) -> Any:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__snake_case )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase__( self :Union[str, Any] ) -> Tuple:
pass
@slow
def lowerCamelCase__( self :int ) -> List[Any]:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = CTRLModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def lowerCamelCase__( self :Dict ) -> List[str]:
pass
@require_torch
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Union[str, Any] ) -> str:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowerCamelCase__( self :Any ) -> Dict:
a__ = CTRLLMHeadModel.from_pretrained('ctrl' )
model.to(__snake_case )
a__ = torch.tensor(
[[1_18_59, 0, 16_11, 8]] ,dtype=torch.long ,device=__snake_case ) # Legal the president is
a__ = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
a__ = model.generate(__snake_case ,do_sample=__snake_case )
self.assertListEqual(output_ids[0].tolist() ,__snake_case )
| 657 | 1 |
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0 ):
a__ = -1
a__ = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
a__ = (n * n - 2 * a * n) // (2 * n - 2 * a)
a__ = n - a - b
if c * c == (a * a + b * b):
a__ = a * b * c
if candidate >= product:
a__ = candidate
return product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 657 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = 1
@register_to_config
def __init__( self :Optional[int] ,__snake_case :int = 10_00 ,__snake_case :Optional[Union[np.ndarray, List[float]]] = None ) -> int:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__snake_case )
# standard deviation of the initial noise distribution
a__ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
a__ = 4
# running values
a__ = []
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :int ,__snake_case :Union[str, torch.device] = None ) -> Union[str, Any]:
a__ = num_inference_steps
a__ = torch.linspace(1 ,0 ,num_inference_steps + 1 )[:-1]
a__ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
a__ = torch.tensor(self.config.trained_betas ,dtype=torch.floataa )
else:
a__ = torch.sin(steps * math.pi / 2 ) ** 2
a__ = (1.0 - self.betas**2) ** 0.5
a__ = (torch.atana(self.betas ,self.alphas ) / math.pi * 2)[:-1]
a__ = timesteps.to(__snake_case )
a__ = []
def lowerCamelCase__( self :Any ,__snake_case :torch.FloatTensor ,__snake_case :int ,__snake_case :torch.FloatTensor ,__snake_case :bool = True ,) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
a__ = (self.timesteps == timestep).nonzero().item()
a__ = timestep_index + 1
a__ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__snake_case )
if len(self.ets ) == 1:
a__ = self.ets[-1]
elif len(self.ets ) == 2:
a__ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
a__ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
a__ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
a__ = self._get_prev_sample(__snake_case ,__snake_case ,__snake_case ,__snake_case )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :torch.FloatTensor ,*__snake_case :int ,**__snake_case :Optional[int] ) -> torch.FloatTensor:
return sample
def lowerCamelCase__( self :Optional[Any] ,__snake_case :List[Any] ,__snake_case :Optional[int] ,__snake_case :Dict ,__snake_case :Any ) -> Optional[Any]:
a__ = self.alphas[timestep_index]
a__ = self.betas[timestep_index]
a__ = self.alphas[prev_timestep_index]
a__ = self.betas[prev_timestep_index]
a__ = (sample - sigma * ets) / max(__snake_case ,1E-8 )
a__ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self :Any ) -> Union[str, Any]:
return self.config.num_train_timesteps
| 657 | 1 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ):
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
a__ = ksize + 1
a__ = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__lowerCAmelCase ):
for x in range(__lowerCAmelCase ):
# distance from center
a__ = x - ksize // 2
a__ = y - ksize // 2
# degree to radiant
a__ = theta / 1_8_0 * np.pi
a__ = np.cos(_theta )
a__ = np.sin(_theta )
# get kernel x
a__ = cos_theta * px + sin_theta * py
# get kernel y
a__ = -sin_theta * px + cos_theta * py
# fill kernel
a__ = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
snake_case : Dict = imread('''../image_data/lena.jpg''')
# turn image in gray scale value
snake_case : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
snake_case : List[str] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 1_20, 1_50]:
snake_case : Optional[Any] = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
snake_case : List[str] = out / out.max() * 2_55
snake_case : Union[str, Any] = out.astype(np.uinta)
imshow('''Original''', gray)
imshow('''Gabor filter with 20x20 mask and 6 directions''', out)
waitKey(0)
| 657 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case : Any = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Union[str, Any] = ['''MobileViTFeatureExtractor''']
snake_case : int = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Dict = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Tuple = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
snake_case : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 1 |
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
snake_case : List[str] = logging.getLogger(__name__)
class snake_case_ (lowerCamelCase_ ):
def __init__( self :Optional[Any] ,__snake_case :int ,__snake_case :List[str] ,__snake_case :List[Any] ,__snake_case :Dict=None ) -> Union[str, Any]:
super().__init__(
__snake_case ,question_encoder_tokenizer=__snake_case ,generator_tokenizer=__snake_case ,index=__snake_case ,init_retrieval=__snake_case ,)
a__ = None
def lowerCamelCase__( self :List[str] ,__snake_case :int ) -> Optional[Any]:
logger.info('initializing retrieval' )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('dist initialized' )
# needs to be set manually
a__ = self._infer_socket_ifname()
# avoid clash with the NCCL port
a__ = str(distributed_port + 1 )
a__ = dist.new_group(ranks=__snake_case ,backend='gloo' )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('dist not initialized / main' )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def lowerCamelCase__( self :Union[str, Any] ) -> Any:
return dist.get_rank(group=self.process_group ) == 0
def lowerCamelCase__( self :List[Any] ,__snake_case :Tuple ,__snake_case :List[str] ,__snake_case :int=torch.floataa ) -> int:
a__ = torch.empty(__snake_case ,dtype=__snake_case )
dist.scatter(__snake_case ,src=0 ,scatter_list=__snake_case ,group=self.process_group )
return target_tensor
def lowerCamelCase__( self :Optional[int] ) -> Tuple:
a__ = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
a__ = next((addr for addr in addrs if addr.startswith('e' )) ,__snake_case )
return ifname
def lowerCamelCase__( self :int ,__snake_case :np.ndarray ,__snake_case :int ) -> Tuple[np.ndarray, List[dict]]:
# single GPU training
if not dist.is_initialized():
a__ , a__ = self._main_retrieve(__snake_case ,__snake_case )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__snake_case )
# distributed training
a__ = dist.get_world_size(group=self.process_group )
# gather logic
a__ = None
if self._is_main():
a__ = [torch.empty(question_hidden_states.shape ,dtype=torch.floataa ) for _ in range(__snake_case )]
dist.gather(torch.tensor(__snake_case ) ,dst=0 ,gather_list=__snake_case ,group=self.process_group )
# scatter logic
a__ = question_hidden_states.shape[0]
a__ = []
a__ = []
if self._is_main():
assert len(__snake_case ) == world_size
a__ , a__ = self._main_retrieve(torch.cat(__snake_case ).numpy() ,__snake_case )
a__ , a__ = torch.tensor(__snake_case ), torch.tensor(__snake_case )
a__ = self._chunk_tensor(__snake_case ,__snake_case )
a__ = self._chunk_tensor(__snake_case ,__snake_case )
a__ = self._scattered(__snake_case ,[n_queries, n_docs] ,target_type=torch.intaa )
a__ = self._scattered(__snake_case ,[n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__snake_case )
| 657 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
snake_case : Dict = logging.get_logger(__name__)
snake_case : Any = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
a__ = TOKENIZER_CLASSES
else:
a__ = {tokenizer_name: getattr(__lowerCAmelCase , tokenizer_name + 'Fast' )}
logger.info(F'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
a__ = TOKENIZER_CLASSES[tokenizer_name]
a__ = True
if checkpoint_name is None:
a__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
a__ = [checkpoint_name]
logger.info(F'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(F'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
a__ = tokenizer_class.from_pretrained(__lowerCAmelCase , force_download=__lowerCAmelCase )
# Save fast tokenizer
logger.info(F'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
a__ , a__ = checkpoint.split('/' )
a__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
elif add_prefix:
a__ = checkpoint
a__ = dump_path
else:
a__ = None
a__ = dump_path
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
a__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
a__ = file_path.split(__lowerCAmelCase )[-1][0]
if next_char == "/":
a__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
a__ = None
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
a__ = tokenizer.save_pretrained(
__lowerCAmelCase , legacy_format=__lowerCAmelCase , filename_prefix=__lowerCAmelCase )
logger.info(F'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(__lowerCAmelCase )
logger.info(F'=> removing {file_name}' )
if __name__ == "__main__":
snake_case : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
f"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
snake_case : List[str] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 657 | 1 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def __lowercase ( __lowerCAmelCase : str ):
return "".join(sorted(__lowerCAmelCase ) )
def __lowercase ( __lowerCAmelCase : str ):
return word_by_signature[signature(__lowerCAmelCase )]
snake_case : str = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''')
snake_case : List[str] = sorted({word.strip().lower() for word in data.splitlines()})
snake_case : str = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
snake_case : Union[str, Any] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('''anagrams.txt''', '''w''') as file:
file.write('''all_anagrams = \n ''')
file.write(pprint.pformat(all_anagrams))
| 657 |
from math import ceil, sqrt
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_0_0_0 ):
a__ = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
a__ = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
a__ = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 657 | 1 |
from math import ceil, sqrt
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_0_0_0 ):
a__ = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
a__ = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
a__ = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 657 |
from sklearn.metrics import fa_score
import datasets
snake_case : Optional[int] = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
snake_case : List[Any] = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
snake_case : Union[str, Any] = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ (datasets.Metric ):
def lowerCamelCase__( self :Any ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) ,reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] ,)
def lowerCamelCase__( self :Dict ,__snake_case :str ,__snake_case :str ,__snake_case :Dict=None ,__snake_case :str=1 ,__snake_case :Optional[int]="binary" ,__snake_case :Union[str, Any]=None ) -> Tuple:
a__ = fa_score(
__snake_case ,__snake_case ,labels=__snake_case ,pos_label=__snake_case ,average=__snake_case ,sample_weight=__snake_case )
return {"f1": float(__snake_case ) if score.size == 1 else score}
| 657 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class snake_case_ :
# setable values
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Optional[jnp.ndarray] = None
UpperCAmelCase__ : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def lowerCamelCase__( cls :Optional[int] ) -> Optional[Any]:
return cls()
@dataclass
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : jnp.ndarray
UpperCAmelCase__ : jnp.ndarray
UpperCAmelCase__ : KarrasVeSchedulerState
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ ):
@property
def lowerCamelCase__( self :Union[str, Any] ) -> Optional[Any]:
return True
@register_to_config
def __init__( self :Tuple ,__snake_case :float = 0.02 ,__snake_case :float = 1_00 ,__snake_case :float = 1.0_07 ,__snake_case :float = 80 ,__snake_case :float = 0.05 ,__snake_case :float = 50 ,) -> str:
pass
def lowerCamelCase__( self :str ) -> List[str]:
return KarrasVeSchedulerState.create()
def lowerCamelCase__( self :Tuple ,__snake_case :KarrasVeSchedulerState ,__snake_case :int ,__snake_case :Tuple = () ) -> KarrasVeSchedulerState:
a__ = jnp.arange(0 ,__snake_case )[::-1].copy()
a__ = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__snake_case ,schedule=jnp.array(__snake_case ,dtype=jnp.floataa ) ,timesteps=__snake_case ,)
def lowerCamelCase__( self :Tuple ,__snake_case :KarrasVeSchedulerState ,__snake_case :jnp.ndarray ,__snake_case :float ,__snake_case :random.KeyArray ,) -> Tuple[jnp.ndarray, float]:
if self.config.s_min <= sigma <= self.config.s_max:
a__ = min(self.config.s_churn / state.num_inference_steps ,2**0.5 - 1 )
else:
a__ = 0
# sample eps ~ N(0, S_noise^2 * I)
a__ = random.split(__snake_case ,num=1 )
a__ = self.config.s_noise * random.normal(key=__snake_case ,shape=sample.shape )
a__ = sigma + gamma * sigma
a__ = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def lowerCamelCase__( self :Dict ,__snake_case :KarrasVeSchedulerState ,__snake_case :jnp.ndarray ,__snake_case :float ,__snake_case :float ,__snake_case :jnp.ndarray ,__snake_case :bool = True ,) -> Union[FlaxKarrasVeOutput, Tuple]:
a__ = sample_hat + sigma_hat * model_output
a__ = (sample_hat - pred_original_sample) / sigma_hat
a__ = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__snake_case ,derivative=__snake_case ,state=__snake_case )
def lowerCamelCase__( self :Optional[Any] ,__snake_case :KarrasVeSchedulerState ,__snake_case :jnp.ndarray ,__snake_case :float ,__snake_case :float ,__snake_case :jnp.ndarray ,__snake_case :jnp.ndarray ,__snake_case :jnp.ndarray ,__snake_case :bool = True ,) -> Union[FlaxKarrasVeOutput, Tuple]:
a__ = sample_prev + sigma_prev * model_output
a__ = (sample_prev - pred_original_sample) / sigma_prev
a__ = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__snake_case ,derivative=__snake_case ,state=__snake_case )
def lowerCamelCase__( self :Optional[int] ,__snake_case :KarrasVeSchedulerState ,__snake_case :int ,__snake_case :str ,__snake_case :List[str] ) -> Optional[int]:
raise NotImplementedError()
| 657 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
snake_case : Any = logging.get_logger(__name__)
snake_case : Tuple = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class snake_case_ (lowerCamelCase_ ):
def __init__( self :str ,__snake_case :Dict=None ,__snake_case :int=None ,*__snake_case :str ,**__snake_case :Union[str, Any] ) -> Tuple:
super().__init__(*__snake_case ,**__snake_case )
if config is None:
assert isinstance(self.model ,__snake_case ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F' {self.model.__class__}'
)
a__ = self.model.config
else:
a__ = config
a__ = data_args
a__ = self.config.tgt_vocab_size if isinstance(self.config ,__snake_case ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'
' padding..' )
if self.args.label_smoothing == 0:
a__ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
a__ = label_smoothed_nll_loss
def lowerCamelCase__( self :Optional[Any] ,__snake_case :int ) -> Tuple:
if self.optimizer is None:
a__ = ['bias', 'LayerNorm.weight']
a__ = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
a__ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
a__ = Adafactor
a__ = {'scale_parameter': False, 'relative_step': False}
else:
a__ = AdamW
a__ = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
a__ = self.args.learning_rate
if self.sharded_ddp:
a__ = OSS(
params=__snake_case ,optim=__snake_case ,**__snake_case ,)
else:
a__ = optimizer_cls(__snake_case ,**__snake_case )
if self.lr_scheduler is None:
a__ = self._get_lr_scheduler(__snake_case )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def lowerCamelCase__( self :Dict ,__snake_case :List[str] ) -> Union[str, Any]:
a__ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
a__ = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
a__ = schedule_func(self.optimizer ,num_warmup_steps=self.args.warmup_steps )
else:
a__ = schedule_func(
self.optimizer ,num_warmup_steps=self.args.warmup_steps ,num_training_steps=__snake_case )
return scheduler
def lowerCamelCase__( self :Optional[Any] ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset ,torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size ,distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) ,)
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowerCamelCase__( self :str ,__snake_case :Optional[int] ,__snake_case :List[Any] ,__snake_case :Any ) -> Optional[Any]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
a__ = model(**__snake_case ,use_cache=__snake_case )[0]
a__ = self.loss_fn(logits.view(-1 ,logits.shape[-1] ) ,labels.view(-1 ) )
else:
# compute usual loss via models
a__ , a__ = model(**__snake_case ,labels=__snake_case ,use_cache=__snake_case )[:2]
else:
# compute label smoothed loss
a__ = model(**__snake_case ,use_cache=__snake_case )[0]
a__ = torch.nn.functional.log_softmax(__snake_case ,dim=-1 )
a__ , a__ = self.loss_fn(__snake_case ,__snake_case ,self.args.label_smoothing ,ignore_index=self.config.pad_token_id )
return loss, logits
def lowerCamelCase__( self :List[Any] ,__snake_case :Dict ,__snake_case :Optional[int] ) -> Any:
a__ = inputs.pop('labels' )
a__ , a__ = self._compute_loss(__snake_case ,__snake_case ,__snake_case )
return loss
def lowerCamelCase__( self :Optional[Any] ,__snake_case :nn.Module ,__snake_case :Dict[str, Union[torch.Tensor, Any]] ,__snake_case :bool ,__snake_case :Optional[List[str]] = None ,) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
a__ = self._prepare_inputs(__snake_case )
a__ = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
a__ = self.model.generate(
inputs['input_ids'] ,attention_mask=inputs['attention_mask'] ,**__snake_case ,)
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
a__ = self._pad_tensors_to_max_len(__snake_case ,gen_kwargs['max_length'] )
a__ = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
a__ , a__ = self._compute_loss(__snake_case ,__snake_case ,__snake_case )
a__ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
a__ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
a__ = self._pad_tensors_to_max_len(__snake_case ,gen_kwargs['max_length'] )
return (loss, logits, labels)
def lowerCamelCase__( self :List[str] ,__snake_case :Optional[Any] ,__snake_case :Union[str, Any] ) -> int:
# If PAD token is not defined at least EOS token has to be defined
a__ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
F' padded to `max_length`={max_length}' )
a__ = pad_token_id * torch.ones(
(tensor.shape[0], max_length) ,dtype=tensor.dtype ,device=tensor.device )
a__ = tensor
return padded_tensor
| 657 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : List[str] = logging.get_logger(__name__)
snake_case : int = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Any = '''mgp-str'''
def __init__( self :List[Any] ,__snake_case :str=[32, 1_28] ,__snake_case :int=4 ,__snake_case :Any=3 ,__snake_case :int=27 ,__snake_case :str=38 ,__snake_case :List[Any]=5_02_57 ,__snake_case :str=3_05_22 ,__snake_case :List[str]=7_68 ,__snake_case :Dict=12 ,__snake_case :Any=12 ,__snake_case :List[Any]=4.0 ,__snake_case :Optional[Any]=True ,__snake_case :Optional[Any]=False ,__snake_case :List[str]=1E-5 ,__snake_case :Optional[Any]=0.0 ,__snake_case :List[str]=0.0 ,__snake_case :int=0.0 ,__snake_case :List[str]=False ,__snake_case :int=0.02 ,**__snake_case :List[Any] ,) -> Any:
super().__init__(**__snake_case )
a__ = image_size
a__ = patch_size
a__ = num_channels
a__ = max_token_length
a__ = num_character_labels
a__ = num_bpe_labels
a__ = num_wordpiece_labels
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = mlp_ratio
a__ = distilled
a__ = layer_norm_eps
a__ = drop_rate
a__ = qkv_bias
a__ = attn_drop_rate
a__ = drop_path_rate
a__ = output_aa_attentions
a__ = initializer_range
| 657 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
snake_case : Dict = '''
Human: <<task>>
Assistant: '''
snake_case : Optional[int] = '''huggingface-tools/default-prompts'''
snake_case : Tuple = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any="run" ):
if prompt_or_repo_id is None:
a__ = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('\\s' , __lowerCAmelCase ) is not None:
return prompt_or_repo_id
a__ = cached_file(
__lowerCAmelCase , PROMPT_FILES[mode] , repo_type='dataset' , user_agent={'agent': agent_name} )
with open(__lowerCAmelCase , 'r' , encoding='utf-8' ) as f:
return f.read()
| 657 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : Any = logging.get_logger(__name__)
snake_case : Dict = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : List[Any] = '''transfo-xl'''
UpperCAmelCase__ : Dict = ['''mems''']
UpperCAmelCase__ : Union[str, Any] = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self :Optional[Any] ,__snake_case :Any=26_77_35 ,__snake_case :Optional[int]=[2_00_00, 4_00_00, 20_00_00] ,__snake_case :Tuple=10_24 ,__snake_case :Optional[Any]=10_24 ,__snake_case :Optional[Any]=16 ,__snake_case :Optional[int]=64 ,__snake_case :str=40_96 ,__snake_case :Optional[int]=4 ,__snake_case :Optional[Any]=False ,__snake_case :int=18 ,__snake_case :List[Any]=16_00 ,__snake_case :Optional[Any]=10_00 ,__snake_case :Optional[Any]=True ,__snake_case :Dict=True ,__snake_case :Optional[int]=0 ,__snake_case :List[Any]=-1 ,__snake_case :Any=True ,__snake_case :int=0.1 ,__snake_case :Any=0.0 ,__snake_case :Optional[Any]=True ,__snake_case :Union[str, Any]="normal" ,__snake_case :Any=0.01 ,__snake_case :int=0.01 ,__snake_case :str=0.02 ,__snake_case :Any=1E-5 ,__snake_case :Any=0 ,**__snake_case :Any ,) -> List[str]:
a__ = vocab_size
a__ = []
self.cutoffs.extend(__snake_case )
if proj_share_all_but_first:
a__ = [False] + [True] * len(self.cutoffs )
else:
a__ = [False] + [False] * len(self.cutoffs )
a__ = d_model
a__ = d_embed
a__ = d_head
a__ = d_inner
a__ = div_val
a__ = pre_lnorm
a__ = n_layer
a__ = n_head
a__ = mem_len
a__ = same_length
a__ = attn_type
a__ = clamp_len
a__ = sample_softmax
a__ = adaptive
a__ = dropout
a__ = dropatt
a__ = untie_r
a__ = init
a__ = init_range
a__ = proj_init_std
a__ = init_std
a__ = layer_norm_epsilon
super().__init__(eos_token_id=__snake_case ,**__snake_case )
@property
def lowerCamelCase__( self :Optional[Any] ) -> Optional[int]:
# Message copied from Transformer-XL documentation
logger.info(F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Any ) -> Tuple:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 657 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
a__ = precision
a__ = ceil(precision / 1_4 )
a__ = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
a__ = 1
a__ = 1_3_5_9_1_4_0_9
a__ = Decimal(__lowerCAmelCase )
for k in range(1 , __lowerCAmelCase ):
a__ = factorial(6 * k ) // (factorial(3 * k ) * factorial(__lowerCAmelCase ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
snake_case : Tuple = 50
print(f"""The first {n} digits of pi is: {pi(n)}""")
| 657 | 1 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
snake_case : Optional[Any] = data_utils.TransfoXLTokenizer
snake_case : Dict = data_utils.TransfoXLCorpus
snake_case : Optional[Any] = data_utils
snake_case : str = data_utils
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] ):
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(__lowerCAmelCase , 'rb' ) as fp:
a__ = pickle.load(__lowerCAmelCase , encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
a__ = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(F'Save vocabulary to {pytorch_vocab_dump_path}' )
a__ = corpus.vocab.__dict__
torch.save(__lowerCAmelCase , __lowerCAmelCase )
a__ = corpus.__dict__
corpus_dict_no_vocab.pop('vocab' , __lowerCAmelCase )
a__ = pytorch_dump_folder_path + '/' + CORPUS_NAME
print(F'Save dataset to {pytorch_dataset_dump_path}' )
torch.save(__lowerCAmelCase , __lowerCAmelCase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
a__ = os.path.abspath(__lowerCAmelCase )
a__ = os.path.abspath(__lowerCAmelCase )
print(F'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
a__ = TransfoXLConfig()
else:
a__ = TransfoXLConfig.from_json_file(__lowerCAmelCase )
print(F'Building PyTorch model from configuration: {config}' )
a__ = TransfoXLLMHeadModel(__lowerCAmelCase )
a__ = load_tf_weights_in_transfo_xl(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Save pytorch-model
a__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
a__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
print(F'Save PyTorch model to {os.path.abspath(__lowerCAmelCase )}' )
torch.save(model.state_dict() , __lowerCAmelCase )
print(F'Save configuration file to {os.path.abspath(__lowerCAmelCase )}' )
with open(__lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
snake_case : Any = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--tf_checkpoint_path''',
default='''''',
type=str,
help='''An optional path to a TensorFlow checkpoint path to be converted.''',
)
parser.add_argument(
'''--transfo_xl_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--transfo_xl_dataset_file''',
default='''''',
type=str,
help='''An optional dataset file to be converted in a vocabulary.''',
)
snake_case : Any = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 657 |
def __lowercase ( __lowerCAmelCase : int = 2_0_0 ):
a__ = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
a__ = [0] * (pence + 1)
a__ = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(__lowerCAmelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82
| 657 | 1 |
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : List[Any]=1E-1_2 ):
a__ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__lowerCAmelCase , axis=1 ) , a_min=__lowerCAmelCase ) ).T
a__ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__lowerCAmelCase , axis=1 ) , a_min=__lowerCAmelCase ) ).T
return jnp.matmul(__lowerCAmelCase , norm_emb_a.T )
class snake_case_ (nn.Module ):
UpperCAmelCase__ : CLIPConfig
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCamelCase__( self :str ) -> Optional[int]:
a__ = FlaxCLIPVisionModule(self.config.vision_config )
a__ = nn.Dense(self.config.projection_dim ,use_bias=__snake_case ,dtype=self.dtype )
a__ = self.param('concept_embeds' ,jax.nn.initializers.ones ,(17, self.config.projection_dim) )
a__ = self.param(
'special_care_embeds' ,jax.nn.initializers.ones ,(3, self.config.projection_dim) )
a__ = self.param('concept_embeds_weights' ,jax.nn.initializers.ones ,(17,) )
a__ = self.param('special_care_embeds_weights' ,jax.nn.initializers.ones ,(3,) )
def __call__( self :Optional[Any] ,__snake_case :List[str] ) -> Dict:
a__ = self.vision_model(__snake_case )[1]
a__ = self.visual_projection(__snake_case )
a__ = jax_cosine_distance(__snake_case ,self.special_care_embeds )
a__ = jax_cosine_distance(__snake_case ,self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
a__ = 0.0
a__ = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
a__ = jnp.round(__snake_case ,3 )
a__ = jnp.any(special_scores > 0 ,axis=1 ,keepdims=__snake_case )
# Use a lower threshold if an image has any special care concept
a__ = is_special_care * 0.01
a__ = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
a__ = jnp.round(__snake_case ,3 )
a__ = jnp.any(concept_scores > 0 ,axis=1 )
return has_nsfw_concepts
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Union[str, Any] = CLIPConfig
UpperCAmelCase__ : Tuple = '''clip_input'''
UpperCAmelCase__ : List[Any] = FlaxStableDiffusionSafetyCheckerModule
def __init__( self :List[Any] ,__snake_case :CLIPConfig ,__snake_case :Optional[Tuple] = None ,__snake_case :int = 0 ,__snake_case :jnp.dtype = jnp.floataa ,__snake_case :bool = True ,**__snake_case :int ,) -> List[str]:
if input_shape is None:
a__ = (1, 2_24, 2_24, 3)
a__ = self.module_class(config=__snake_case ,dtype=__snake_case ,**__snake_case )
super().__init__(__snake_case ,__snake_case ,input_shape=__snake_case ,seed=__snake_case ,dtype=__snake_case ,_do_init=_do_init )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :jax.random.KeyArray ,__snake_case :Tuple ,__snake_case :FrozenDict = None ) -> FrozenDict:
# init input tensor
a__ = jax.random.normal(__snake_case ,__snake_case )
a__ , a__ = jax.random.split(__snake_case )
a__ = {'params': params_rng, 'dropout': dropout_rng}
a__ = self.module.init(__snake_case ,__snake_case )['params']
return random_params
def __call__( self :List[Any] ,__snake_case :int ,__snake_case :dict = None ,) -> List[Any]:
a__ = jnp.transpose(__snake_case ,(0, 2, 3, 1) )
return self.module.apply(
{'params': params or self.params} ,jnp.array(__snake_case ,dtype=jnp.floataa ) ,rngs={} ,)
| 657 |
from manim import *
class snake_case_ (lowerCamelCase_ ):
def lowerCamelCase__( self :Optional[Any] ) -> Optional[int]:
a__ = Rectangle(height=0.5 ,width=0.5 )
a__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
a__ = Rectangle(height=0.25 ,width=0.25 )
a__ = [mem.copy() for i in range(6 )]
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('CPU' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__snake_case )
a__ = [mem.copy() for i in range(4 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('GPU' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
gpu.move_to([-1, -1, 0] )
self.add(__snake_case )
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('Model' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
model.move_to([3, -1.0, 0] )
self.add(__snake_case )
a__ = []
a__ = []
for i, rect in enumerate(__snake_case ):
a__ = fill.copy().set_fill(__snake_case ,opacity=0.8 )
target.move_to(__snake_case )
model_arr.append(__snake_case )
a__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(__snake_case ,opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__snake_case )
self.add(*__snake_case ,*__snake_case )
a__ = [meta_mem.copy() for i in range(6 )]
a__ = [meta_mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('Disk' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
disk.move_to([-4, -1.25, 0] )
self.add(__snake_case ,__snake_case )
a__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
a__ = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(__snake_case ,__snake_case )
a__ = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' ,font_size=18 ,)
blue_text.next_to(__snake_case ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(__snake_case )
a__ = MarkupText(
F'Now watch as an input is passed through the model\nand how the memory is utilized and handled.' ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ) )
a__ = Square(0.3 )
input.set_fill(__snake_case ,opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] ,__snake_case ,buff=0.5 )
self.play(Write(__snake_case ) )
input.generate_target()
input.target.next_to(model_arr[0] ,direction=__snake_case ,buff=0.02 )
self.play(MoveToTarget(__snake_case ) )
self.play(FadeOut(__snake_case ) )
a__ = Arrow(start=__snake_case ,end=__snake_case ,color=__snake_case ,buff=0.5 )
a.next_to(model_arr[0].get_left() ,__snake_case ,buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
a__ = MarkupText(
F'As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.' ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ,run_time=3 ) )
a__ = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(__snake_case ) ,Circumscribe(model_arr[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(model_cpu_arr[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,)
self.play(MoveToTarget(model_cpu_arr[0] ) )
a__ = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 ,__snake_case ,buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
a__ = AnimationGroup(
FadeOut(__snake_case ,run_time=0.5 ) ,MoveToTarget(__snake_case ,run_time=0.5 ) ,FadeIn(__snake_case ,run_time=0.5 ) ,lag_ratio=0.2 )
self.play(__snake_case )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
a__ = 0.7
self.play(
Circumscribe(model_arr[i] ,**__snake_case ) ,Circumscribe(cpu_left_col_base[i] ,**__snake_case ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(model_arr[i + 1] ,color=__snake_case ,**__snake_case ) ,)
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,)
else:
self.play(
MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,)
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.02 ,buff=0.2 )
self.play(
Circumscribe(model_arr[-1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(cpu_left_col_base[-1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,)
self.play(MoveToTarget(model_cpu_arr[i] ) )
a__ = a_c
a__ = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] ,RIGHT + 0.02 ,buff=0.5 )
self.play(
FadeOut(__snake_case ) ,FadeOut(__snake_case ,run_time=0.5 ) ,)
a__ = MarkupText(F'Inference on a model too large for GPU memory\nis successfully completed.' ,font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ,run_time=3 ) ,MoveToTarget(__snake_case ) )
self.wait()
| 657 | 1 |
def __lowercase ( __lowerCAmelCase : float ):
return 1_0 - x * x
def __lowercase ( __lowerCAmelCase : float , __lowerCAmelCase : float ):
# Bolzano theory in order to find if there is a root between a and b
if equation(__lowerCAmelCase ) * equation(__lowerCAmelCase ) >= 0:
raise ValueError('Wrong space!' )
a__ = a
while (b - a) >= 0.01:
# Find middle point
a__ = (a + b) / 2
# Check if middle point is root
if equation(__lowerCAmelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__lowerCAmelCase ) * equation(__lowerCAmelCase ) < 0:
a__ = c
else:
a__ = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 657 |
from math import pi
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 657 | 1 |
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
snake_case : int = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class snake_case_ (lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : List[Any] = SpeechTaTokenizer
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : Tuple = True
def lowerCamelCase__( self :Any ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
a__ = SpeechTaTokenizer(__snake_case )
a__ = AddedToken('<mask>' ,lstrip=__snake_case ,rstrip=__snake_case )
a__ = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__( self :str ,__snake_case :List[str] ) -> List[Any]:
a__ = 'this is a test'
a__ = 'this is a test'
return input_text, output_text
def lowerCamelCase__( self :Any ,__snake_case :Tuple ,__snake_case :int=False ,__snake_case :Tuple=20 ,__snake_case :str=5 ) -> List[str]:
a__ , a__ = self.get_input_output_texts(__snake_case )
a__ = tokenizer.encode(__snake_case ,add_special_tokens=__snake_case )
a__ = tokenizer.decode(__snake_case ,clean_up_tokenization_spaces=__snake_case )
return text, ids
def lowerCamelCase__( self :Any ) -> Tuple:
a__ = '<pad>'
a__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) ,__snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) ,__snake_case )
def lowerCamelCase__( self :List[Any] ) -> Dict:
a__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<s>' )
self.assertEqual(vocab_keys[1] ,'<pad>' )
self.assertEqual(vocab_keys[-4] ,'œ' )
self.assertEqual(vocab_keys[-2] ,'<mask>' )
self.assertEqual(vocab_keys[-1] ,'<ctc_blank>' )
self.assertEqual(len(__snake_case ) ,81 )
def lowerCamelCase__( self :Union[str, Any] ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size ,79 )
def lowerCamelCase__( self :Dict ) -> str:
a__ = self.get_tokenizers(do_lower_case=__snake_case )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
a__ = tokenizer.vocab_size
a__ = len(__snake_case )
self.assertNotEqual(__snake_case ,0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
a__ = ['aaaaa bbbbbb', 'cccccccccdddddddd']
a__ = tokenizer.add_tokens(__snake_case )
a__ = tokenizer.vocab_size
a__ = len(__snake_case )
self.assertNotEqual(__snake_case ,0 )
self.assertEqual(__snake_case ,__snake_case )
self.assertEqual(__snake_case ,len(__snake_case ) )
self.assertEqual(__snake_case ,all_size + len(__snake_case ) )
a__ = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' ,add_special_tokens=__snake_case )
self.assertGreaterEqual(len(__snake_case ) ,4 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
a__ = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
a__ = tokenizer.add_special_tokens(__snake_case )
a__ = tokenizer.vocab_size
a__ = len(__snake_case )
self.assertNotEqual(__snake_case ,0 )
self.assertEqual(__snake_case ,__snake_case )
self.assertEqual(__snake_case ,len(__snake_case ) )
self.assertEqual(__snake_case ,all_size_a + len(__snake_case ) )
a__ = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' ,add_special_tokens=__snake_case )
self.assertGreaterEqual(len(__snake_case ) ,6 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] ,tokens[1] )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokens[-4] )
self.assertEqual(tokens[0] ,tokenizer.eos_token_id )
self.assertEqual(tokens[-3] ,tokenizer.pad_token_id )
def lowerCamelCase__( self :Any ) -> int:
pass
def lowerCamelCase__( self :Any ) -> str:
pass
def lowerCamelCase__( self :Union[str, Any] ) -> int:
a__ = self.get_tokenizer()
a__ = tokenizer.tokenize('This is a test' )
# fmt: off
self.assertListEqual(__snake_case ,[SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case ) ,[4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] ,)
a__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__snake_case ,[SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
a__ = tokenizer.convert_tokens_to_ids(__snake_case )
# fmt: off
self.assertListEqual(__snake_case ,[4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
a__ = tokenizer.convert_ids_to_tokens(__snake_case )
self.assertListEqual(
__snake_case ,[SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
@slow
def lowerCamelCase__( self :List[str] ) -> List[Any]:
# Use custom sequence because this tokenizer does not handle numbers.
a__ = [
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
a__ = {
'input_ids': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case ,model_name='microsoft/speecht5_asr' ,revision='c5ef64c71905caeccde0e4462ef3f9077224c524' ,sequences=__snake_case ,)
| 657 |
from math import sqrt
def __lowercase ( __lowerCAmelCase : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_1 ):
a__ = 0
a__ = 1
while count != nth and number < 3:
number += 1
if is_prime(__lowerCAmelCase ):
count += 1
while count != nth:
number += 2
if is_prime(__lowerCAmelCase ):
count += 1
return number
if __name__ == "__main__":
print(f"""{solution() = }""")
| 657 | 1 |
from __future__ import annotations
import math
snake_case : int = '''2020.9.26'''
snake_case : List[Any] = '''xcodz-dot, cclaus, dhruvmanila'''
def __lowercase ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float ):
if not all(isinstance(__lowerCAmelCase , (float, int) ) for val in locals().values() ):
a__ = F'Input values must either be float or int: {list(locals().values() )}'
raise TypeError(__lowerCAmelCase )
a__ = ((x * distance) / (z + distance)) * scale
a__ = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def __lowercase ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : str , __lowerCAmelCase : float ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('Axis must be a str' )
a__ = locals()
del input_variables["axis"]
if not all(isinstance(__lowerCAmelCase , (float, int) ) for val in input_variables.values() ):
a__ = (
'Input values except axis must either be float or int: '
F'{list(input_variables.values() )}'
)
raise TypeError(__lowerCAmelCase )
a__ = (angle % 3_6_0) / 4_5_0 * 1_8_0 / math.pi
if axis == "z":
a__ = x * math.cos(__lowerCAmelCase ) - y * math.sin(__lowerCAmelCase )
a__ = y * math.cos(__lowerCAmelCase ) + x * math.sin(__lowerCAmelCase )
a__ = z
elif axis == "x":
a__ = y * math.cos(__lowerCAmelCase ) - z * math.sin(__lowerCAmelCase )
a__ = z * math.cos(__lowerCAmelCase ) + y * math.sin(__lowerCAmelCase )
a__ = x
elif axis == "y":
a__ = x * math.cos(__lowerCAmelCase ) - z * math.sin(__lowerCAmelCase )
a__ = z * math.cos(__lowerCAmelCase ) + x * math.sin(__lowerCAmelCase )
a__ = y
else:
raise ValueError('not a valid axis, choose one of \'x\', \'y\', \'z\'' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(f"""{rotate(1.0, 2.0, 3.0, "y", 90.0) = }""")
| 657 |
import unittest
from knapsack import greedy_knapsack as kp
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[Any] ) -> Union[str, Any]:
a__ = [10, 20, 30, 40, 50, 60]
a__ = [2, 4, 6, 8, 10, 12]
a__ = 1_00
self.assertEqual(kp.calc_profit(__snake_case ,__snake_case ,__snake_case ) ,2_10 )
def lowerCamelCase__( self :str ) -> Optional[int]:
self.assertRaisesRegex(__snake_case ,'max_weight must greater than zero.' )
def lowerCamelCase__( self :Optional[Any] ) -> int:
self.assertRaisesRegex(__snake_case ,'Weight can not be negative.' )
def lowerCamelCase__( self :str ) -> List[str]:
self.assertRaisesRegex(__snake_case ,'Profit can not be negative.' )
def lowerCamelCase__( self :str ) -> Optional[Any]:
self.assertRaisesRegex(__snake_case ,'max_weight must greater than zero.' )
def lowerCamelCase__( self :int ) -> List[Any]:
self.assertRaisesRegex(
__snake_case ,'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 657 | 1 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
snake_case : int = logging.get_logger(__name__)
snake_case : str = Dict[str, Any]
snake_case : List[Any] = List[Prediction]
@add_end_docstrings(lowerCamelCase_ )
class snake_case_ (lowerCamelCase_ ):
def __init__( self :Optional[int] ,*__snake_case :List[Any] ,**__snake_case :Optional[int] ) -> Union[str, Any]:
super().__init__(*__snake_case ,**__snake_case )
if self.framework == "tf":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
requires_backends(self ,'vision' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def lowerCamelCase__( self :Any ,**__snake_case :List[str] ) -> Optional[int]:
a__ = {}
if "threshold" in kwargs:
a__ = kwargs['threshold']
return {}, {}, postprocess_kwargs
def __call__( self :str ,*__snake_case :Optional[int] ,**__snake_case :List[str] ) -> Union[Predictions, List[Prediction]]:
return super().__call__(*__snake_case ,**__snake_case )
def lowerCamelCase__( self :str ,__snake_case :int ) -> Tuple:
a__ = load_image(__snake_case )
a__ = torch.IntTensor([[image.height, image.width]] )
a__ = self.image_processor(images=[image] ,return_tensors='pt' )
if self.tokenizer is not None:
a__ = self.tokenizer(text=inputs['words'] ,boxes=inputs['boxes'] ,return_tensors='pt' )
a__ = target_size
return inputs
def lowerCamelCase__( self :List[str] ,__snake_case :List[Any] ) -> List[Any]:
a__ = model_inputs.pop('target_size' )
a__ = self.model(**__snake_case )
a__ = outputs.__class__({'target_size': target_size, **outputs} )
if self.tokenizer is not None:
a__ = model_inputs['bbox']
return model_outputs
def lowerCamelCase__( self :str ,__snake_case :Optional[int] ,__snake_case :Tuple=0.9 ) -> Tuple:
a__ = model_outputs['target_size']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
a__ , a__ = target_size[0].tolist()
def unnormalize(__snake_case :Tuple ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 10_00),
(height * bbox[1] / 10_00),
(width * bbox[2] / 10_00),
(height * bbox[3] / 10_00),
] ) )
a__ , a__ = model_outputs['logits'].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
a__ = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
a__ = [unnormalize(__snake_case ) for bbox in model_outputs['bbox'].squeeze(0 )]
a__ = ['score', 'label', 'box']
a__ = [dict(zip(__snake_case ,__snake_case ) ) for vals in zip(scores.tolist() ,__snake_case ,__snake_case ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
a__ = self.image_processor.post_process_object_detection(__snake_case ,__snake_case ,__snake_case )
a__ = raw_annotations[0]
a__ = raw_annotation['scores']
a__ = raw_annotation['labels']
a__ = raw_annotation['boxes']
a__ = scores.tolist()
a__ = [self.model.config.idalabel[label.item()] for label in labels]
a__ = [self._get_bounding_box(__snake_case ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
a__ = ['score', 'label', 'box']
a__ = [
dict(zip(__snake_case ,__snake_case ) )
for vals in zip(raw_annotation['scores'] ,raw_annotation['labels'] ,raw_annotation['boxes'] )
]
return annotation
def lowerCamelCase__( self :int ,__snake_case :"torch.Tensor" ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.' )
a__ , a__ , a__ , a__ = box.int().tolist()
a__ = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 657 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Any=1_0 ):
a__ = []
for _ in range(__lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]=1_0 ):
a__ = []
for step in range(__lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = os.path.join(__lowerCAmelCase , 'schedule.bin' )
torch.save(scheduler.state_dict() , __lowerCAmelCase )
a__ = torch.load(__lowerCAmelCase )
scheduler.load_state_dict(__lowerCAmelCase )
return lrs
@require_torch
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[Any] ,__snake_case :int ,__snake_case :Union[str, Any] ) -> int:
self.assertEqual(len(__snake_case ) ,len(__snake_case ) )
for a, b in zip(__snake_case ,__snake_case ):
self.assertAlmostEqual(__snake_case ,__snake_case ,delta=__snake_case )
def lowerCamelCase__( self :Optional[Any] ) -> str:
a__ = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=__snake_case )
a__ = torch.tensor([0.4, 0.2, -0.5] )
a__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
a__ = AdamW(params=[w] ,lr=2E-1 ,weight_decay=0.0 )
for _ in range(1_00 ):
a__ = criterion(__snake_case ,__snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1E-2 )
def lowerCamelCase__( self :Tuple ) -> int:
a__ = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=__snake_case )
a__ = torch.tensor([0.4, 0.2, -0.5] )
a__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
a__ = Adafactor(
params=[w] ,lr=1E-2 ,eps=(1E-30, 1E-3) ,clip_threshold=1.0 ,decay_rate=-0.8 ,betaa=__snake_case ,weight_decay=0.0 ,relative_step=__snake_case ,scale_parameter=__snake_case ,warmup_init=__snake_case ,)
for _ in range(10_00 ):
a__ = criterion(__snake_case ,__snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1E-2 )
@require_torch
class snake_case_ (unittest.TestCase ):
UpperCAmelCase__ : str = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None
UpperCAmelCase__ : Dict = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
UpperCAmelCase__ : Optional[Any] = 1_0
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Optional[int] ,__snake_case :Tuple ,__snake_case :int ,__snake_case :Any=None ) -> Optional[Any]:
self.assertEqual(len(__snake_case ) ,len(__snake_case ) )
for a, b in zip(__snake_case ,__snake_case ):
self.assertAlmostEqual(__snake_case ,__snake_case ,delta=__snake_case ,msg=__snake_case )
def lowerCamelCase__( self :Tuple ) -> List[Any]:
a__ = {'num_warmup_steps': 2, 'num_training_steps': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
a__ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1E-7},
[0.0, 5.0, 10.0, 7.6_56, 5.6_25, 3.9_06, 2.5, 1.4_06, 0.6_25, 0.1_56],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 10.0, 8.1_65, 7.0_71, 6.3_25, 5.7_74, 5.3_45, 5.0, 4.7_14],
),
}
for scheduler_func, data in scheds.items():
a__ , a__ = data
a__ = scheduler_func(self.optimizer ,**__snake_case )
self.assertEqual(len([scheduler.get_lr()[0]] ) ,1 )
a__ = unwrap_schedule(__snake_case ,self.num_steps )
self.assertListAlmostEqual(
__snake_case ,__snake_case ,tol=1E-2 ,msg=F'failed for {scheduler_func} in normal scheduler' ,)
a__ = scheduler_func(self.optimizer ,**__snake_case )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(__snake_case ) # wrap to test picklability of the schedule
a__ = unwrap_and_save_reload_schedule(__snake_case ,self.num_steps )
self.assertListEqual(__snake_case ,__snake_case ,msg=F'failed for {scheduler_func} in save and reload' )
class snake_case_ :
def __init__( self :Tuple ,__snake_case :str ) -> Any:
a__ = fn
def __call__( self :List[str] ,*__snake_case :Optional[Any] ,**__snake_case :Optional[int] ) -> Union[str, Any]:
return self.fn(*__snake_case ,**__snake_case )
@classmethod
def lowerCamelCase__( self :Tuple ,__snake_case :Union[str, Any] ) -> Dict:
a__ = list(map(self ,scheduler.lr_lambdas ) )
| 657 | 1 |
snake_case : Union[str, Any] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
snake_case : int = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
snake_case : Union[str, Any] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 657 |
from __future__ import annotations
def __lowercase ( __lowerCAmelCase : list[int] ): # This function is recursive
a__ = len(__lowerCAmelCase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
a__ = array[0]
a__ = False
a__ = 1
a__ = []
while not is_found and i < array_length:
if array[i] < pivot:
a__ = True
a__ = [element for element in array[i:] if element >= array[i]]
a__ = longest_subsequence(__lowerCAmelCase )
if len(__lowerCAmelCase ) > len(__lowerCAmelCase ):
a__ = temp_array
else:
i += 1
a__ = [element for element in array[1:] if element >= pivot]
a__ = [pivot, *longest_subsequence(__lowerCAmelCase )]
if len(__lowerCAmelCase ) > len(__lowerCAmelCase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 | 1 |
import os
import numpy
import onnx
def __lowercase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] ):
a__ = a.name
a__ = b.name
a__ = ''
a__ = ''
a__ = a == b
a__ = name_a
a__ = name_b
return res
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ):
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(__lowerCAmelCase , __lowerCAmelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , __lowerCAmelCase , __lowerCAmelCase )
_graph_replace_input_with(node_proto.attribute[1].g , __lowerCAmelCase , __lowerCAmelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , __lowerCAmelCase , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] ):
for n in graph_proto.node:
_node_replace_input_with(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ):
a__ = list(model.graph.initializer )
a__ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
a__ = inits[i].name
a__ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , __lowerCAmelCase , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Optional[Any] ):
a__ = os.path.dirname(__lowerCAmelCase )
a__ = os.path.basename(__lowerCAmelCase )
a__ = onnx.load(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
a__ = list(model.graph.initializer )
a__ = set()
a__ = {}
a__ = []
a__ = 0
for i in range(len(__lowerCAmelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(__lowerCAmelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(__lowerCAmelCase )
dup_set.add(__lowerCAmelCase )
a__ = inits[j].data_type
a__ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print('unexpected data type: ' , __lowerCAmelCase )
total_reduced_size += mem_size
a__ = inits[i].name
a__ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(__lowerCAmelCase )
else:
a__ = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , 'GB' )
a__ = sorted(__lowerCAmelCase )
_remove_dup_initializers_from_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
a__ = 'optimized_' + model_file_name
a__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
onnx.save(__lowerCAmelCase , __lowerCAmelCase )
return new_model
| 657 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
snake_case : Dict = logging.get_logger(__name__)
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Dict = ['''pixel_values''']
def __init__( self :Optional[Any] ,__snake_case :bool = True ,__snake_case :int = 32 ,__snake_case :Union[str, Any]=PILImageResampling.BILINEAR ,__snake_case :bool = True ,**__snake_case :Tuple ,) -> None:
a__ = do_resize
a__ = do_rescale
a__ = size_divisor
a__ = resample
super().__init__(**__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :np.ndarray ,__snake_case :int ,__snake_case :Tuple ,__snake_case :Optional[ChannelDimension] = None ,**__snake_case :List[Any] ) -> np.ndarray:
a__ , a__ = get_image_size(__snake_case )
# Rounds the height and width down to the closest multiple of size_divisor
a__ = height // size_divisor * size_divisor
a__ = width // size_divisor * size_divisor
a__ = resize(__snake_case ,(new_h, new_w) ,resample=__snake_case ,data_format=__snake_case ,**__snake_case )
return image
def lowerCamelCase__( self :List[str] ,__snake_case :np.ndarray ,__snake_case :float ,__snake_case :Optional[ChannelDimension] = None ,**__snake_case :str ) -> np.ndarray:
return rescale(image=__snake_case ,scale=__snake_case ,data_format=__snake_case ,**__snake_case )
def lowerCamelCase__( self :Tuple ,__snake_case :Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] ,__snake_case :Optional[bool] = None ,__snake_case :Optional[int] = None ,__snake_case :Union[str, Any]=None ,__snake_case :Optional[bool] = None ,__snake_case :Optional[Union[TensorType, str]] = None ,__snake_case :ChannelDimension = ChannelDimension.FIRST ,**__snake_case :List[Any] ,) -> BatchFeature:
a__ = do_resize if do_resize is not None else self.do_resize
a__ = do_rescale if do_rescale is not None else self.do_rescale
a__ = size_divisor if size_divisor is not None else self.size_divisor
a__ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
a__ = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
a__ = [to_numpy_array(__snake_case ) for img in images]
if do_resize:
a__ = [self.resize(__snake_case ,size_divisor=__snake_case ,resample=__snake_case ) for image in images]
if do_rescale:
a__ = [self.rescale(__snake_case ,scale=1 / 2_55 ) for image in images]
a__ = [to_channel_dimension_format(__snake_case ,__snake_case ) for image in images]
a__ = {'pixel_values': images}
return BatchFeature(data=__snake_case ,tensor_type=__snake_case )
| 657 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case : List[str] = {
'''configuration_blenderbot''': [
'''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotConfig''',
'''BlenderbotOnnxConfig''',
],
'''tokenization_blenderbot''': ['''BlenderbotTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Any = ['''BlenderbotTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[Any] = [
'''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotForCausalLM''',
'''BlenderbotForConditionalGeneration''',
'''BlenderbotModel''',
'''BlenderbotPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : int = [
'''TFBlenderbotForConditionalGeneration''',
'''TFBlenderbotModel''',
'''TFBlenderbotPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[str] = [
'''FlaxBlenderbotForConditionalGeneration''',
'''FlaxBlenderbotModel''',
'''FlaxBlenderbotPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
snake_case : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 |
def __lowercase ( __lowerCAmelCase : int ):
a__ = generate_pascal_triangle(__lowerCAmelCase )
for row_idx in range(__lowerCAmelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=' ' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=' ' )
else:
print(triangle[row_idx][col_idx] , end='' )
print()
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
a__ = []
for current_row_idx in range(__lowerCAmelCase ):
a__ = populate_current_row(__lowerCAmelCase , __lowerCAmelCase )
triangle.append(__lowerCAmelCase )
return triangle
def __lowercase ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : int ):
a__ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
a__ , a__ = 1, 1
for current_col_idx in range(1 , __lowerCAmelCase ):
calculate_current_element(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return current_row
def __lowercase ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , ):
a__ = triangle[current_row_idx - 1][current_col_idx - 1]
a__ = triangle[current_row_idx - 1][current_col_idx]
a__ = above_to_left_elt + above_to_right_elt
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
a__ = [[1]]
for row_index in range(1 , __lowerCAmelCase ):
a__ = [0] + result[-1] + [0]
a__ = row_index + 1
# Calculate the number of distinct elements in a row
a__ = sum(divmod(__lowerCAmelCase , 2 ) )
a__ = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
a__ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
a__ = row_first_half + row_second_half
result.append(__lowerCAmelCase )
return result
def __lowercase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__lowerCAmelCase : Callable , __lowerCAmelCase : int ) -> None:
a__ = F'{func.__name__}({value})'
a__ = timeit(F'__main__.{call}' , setup='import __main__' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'{call:38} -- {timing:.4f} seconds' )
for value in range(1_5 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__lowerCAmelCase , __lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 657 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case : Dict = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : str = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Union[str, Any] = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Any = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
snake_case : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
snake_case : str = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
snake_case : str = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
snake_case : str = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
snake_case : Tuple = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
snake_case : str = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]),
('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
snake_case : Tuple = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
snake_case : int = (
('''JH AH TH KH QH''', 23),
('''JH 9H TH KH QH''', 22),
('''JC KH JS JD JH''', 21),
('''KH KC 3S 3H 3D''', 20),
('''8C 9C 5C 3C TC''', 19),
('''JS QS 9H TS KH''', 18),
('''7C 7S KH 2H 7H''', 17),
('''3C KH 5D 5S KH''', 16),
('''QH 8H KD JH 8S''', 15),
('''2D 6D 9D TH 7D''', 14),
)
def __lowercase ( ):
a__ , a__ = randrange(len(__lowerCAmelCase ) ), randrange(len(__lowerCAmelCase ) )
a__ = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
a__ , a__ = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __lowercase ( __lowerCAmelCase : int = 1_0_0 ):
return (generate_random_hand() for _ in range(__lowerCAmelCase ))
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] ):
assert PokerHand(__lowerCAmelCase )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
assert PokerHand(__lowerCAmelCase )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ):
a__ = PokerHand(__lowerCAmelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] ):
assert PokerHand(__lowerCAmelCase ).compare_with(PokerHand(__lowerCAmelCase ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase ).compare_with(PokerHand(__lowerCAmelCase ) ) == expected
def __lowercase ( ):
a__ = [PokerHand(__lowerCAmelCase ) for hand in SORTED_HANDS]
a__ = poker_hands.copy()
shuffle(__lowerCAmelCase )
a__ = chain(sorted(__lowerCAmelCase ) )
for index, hand in enumerate(__lowerCAmelCase ):
assert hand == poker_hands[index]
def __lowercase ( ):
# Test that five high straights are compared correctly.
a__ = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=__lowerCAmelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __lowercase ( ):
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
a__ = PokerHand('2C 4S AS 3D 5C' )
a__ = True
a__ = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __lowercase ( ):
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
a__ = 0
a__ = os.path.abspath(os.path.dirname(__lowerCAmelCase ) )
a__ = os.path.join(__lowerCAmelCase , 'poker_hands.txt' )
with open(__lowerCAmelCase ) as file_hand:
for line in file_hand:
a__ = line[:1_4].strip()
a__ = line[1_5:].strip()
a__ , a__ = PokerHand(__lowerCAmelCase ), PokerHand(__lowerCAmelCase )
a__ = player.compare_with(__lowerCAmelCase )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 657 | 1 |
snake_case : Union[str, Any] = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
snake_case : int = [{"type": "code", "content": INSTALL_CONTENT}]
snake_case : Dict = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 700 |
def __lowercase ( __lowerCAmelCase : int ):
if length <= 0 or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(__lowerCAmelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 657 | 0 |
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class snake_case_ (_snake_case , _snake_case ):
@register_to_config
def __init__( self :Any ,__snake_case :int = 7_68 ,) -> Union[str, Any]:
super().__init__()
a__ = nn.Parameter(torch.zeros(1 ,lowerCAmelCase__ ) )
a__ = nn.Parameter(torch.ones(1 ,lowerCAmelCase__ ) )
def lowerCamelCase__( self :str ,__snake_case :Optional[Union[str, torch.device]] = None ,__snake_case :Optional[torch.dtype] = None ,) -> List[Any]:
a__ = nn.Parameter(self.mean.to(lowerCAmelCase__ ).to(lowerCAmelCase__ ) )
a__ = nn.Parameter(self.std.to(lowerCAmelCase__ ).to(lowerCAmelCase__ ) )
return self
def lowerCamelCase__( self :int ,__snake_case :Any ) -> Optional[int]:
a__ = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowerCamelCase__( self :Dict ,__snake_case :List[str] ) -> List[Any]:
a__ = (embeds * self.std) + self.mean
return embeds
| 701 |
def __lowercase ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int ):
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError('The length of profit and weight must be same.' )
if max_weight <= 0:
raise ValueError('max_weight must greater than zero.' )
if any(p < 0 for p in profit ):
raise ValueError('Profit can not be negative.' )
if any(w < 0 for w in weight ):
raise ValueError('Weight can not be negative.' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
a__ = [p / w for p, w in zip(__lowerCAmelCase , __lowerCAmelCase )]
# Creating a copy of the list and sorting profit/weight in ascending order
a__ = sorted(__lowerCAmelCase )
# declaring useful variables
a__ = len(__lowerCAmelCase )
a__ = 0
a__ = 0
a__ = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
a__ = sorted_profit_by_weight[length - i - 1]
a__ = profit_by_weight.index(__lowerCAmelCase )
a__ = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'''Input profits, weights, and then max_weight (all positive ints) separated by '''
'''spaces.'''
)
snake_case : Tuple = [int(x) for x in input('''Input profits separated by spaces: ''').split()]
snake_case : Optional[int] = [int(x) for x in input('''Input weights separated by spaces: ''').split()]
snake_case : List[str] = int(input('''Max weight allowed: '''))
# Function Call
calc_profit(profit, weight, max_weight)
| 657 | 0 |
def __lowercase ( __lowerCAmelCase : Any ):
a__ = int(_SCREAMING_SNAKE_CASE )
if n_element < 1:
a__ = ValueError('a should be a positive number' )
raise my_error
a__ = [1]
a__ , a__ , a__ = (0, 0, 0)
a__ = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
snake_case : str = input('''Enter the last number (nth term) of the Hamming Number Series: ''')
print('''Formula of Hamming Number Series => 2^i * 3^j * 5^k''')
snake_case : str = hamming(int(n))
print('''-----------------------------------------------------''')
print(f"""The list with nth numbers is: {hamming_numbers}""")
print('''-----------------------------------------------------''')
| 702 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case : Optional[Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[int] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
snake_case : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 0 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
snake_case : str = logging.getLogger(__name__)
snake_case : List[str] = "pytorch_model.bin"
@dataclasses.dataclass
class snake_case_ :
UpperCAmelCase__ : str = dataclasses.field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models.'''} )
UpperCAmelCase__ : Optional[str] = dataclasses.field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co.'''} , )
@dataclasses.dataclass
class snake_case_ :
UpperCAmelCase__ : str = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the training data.'''} )
UpperCAmelCase__ : str = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the data to predict on.'''} )
UpperCAmelCase__ : Optional[str] = dataclasses.field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''A csv or a json file containing the validation data.'''} )
UpperCAmelCase__ : Optional[str] = dataclasses.field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''The name of the task to train on.'''} , )
UpperCAmelCase__ : Optional[List[str]] = dataclasses.field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''The list of labels for the task.'''} )
@dataclasses.dataclass
class snake_case_ :
UpperCAmelCase__ : str = dataclasses.field(
metadata={'''help''': '''The output directory where the model predictions and checkpoints will be written.'''} )
UpperCAmelCase__ : Optional[str] = dataclasses.field(
default='''accuracy''' , metadata={'''help''': '''The evaluation metric used for the task.'''} )
UpperCAmelCase__ : Optional[str] = dataclasses.field(
default='''no''' , metadata={
'''help''': '''The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]'''
} , )
UpperCAmelCase__ : Optional[int] = dataclasses.field(
default=1_0 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , )
UpperCAmelCase__ : Optional[float] = dataclasses.field(
default=0.0 , metadata={
'''help''': '''How much the specified evaluation metric must improve to satisfy early stopping conditions.'''
} , )
UpperCAmelCase__ : Optional[bool] = dataclasses.field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the confidence score.'''} , )
UpperCAmelCase__ : Optional[bool] = dataclasses.field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the validation performance.'''} , )
UpperCAmelCase__ : Optional[bool] = dataclasses.field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to fine-tune on labeled data after pseudo training.'''} , )
UpperCAmelCase__ : Optional[float] = dataclasses.field(
default=0.0 , metadata={'''help''': '''Confidence threshold for pseudo-labeled data filtering.'''} , )
UpperCAmelCase__ : Optional[int] = dataclasses.field(
default=1_0_0 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , )
UpperCAmelCase__ : Optional[int] = dataclasses.field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Random seed for initialization.'''} , )
def __lowercase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str ):
a__ = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
a__ = dataset.filter(lambda __lowerCAmelCase : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
a__ = int(eval_result * len(__lowerCAmelCase ) )
print(__lowerCAmelCase )
a__ = dataset.sort('probability' , reverse=__lowerCAmelCase )
a__ = dataset.select(range(__lowerCAmelCase ) )
a__ = dataset.remove_columns(['label', 'probability'] )
a__ = dataset.rename_column('prediction' , 'label' )
a__ = dataset.map(lambda __lowerCAmelCase : {"label": idalabel[example["label"]]} )
a__ = dataset.shuffle(seed=args.seed )
a__ = os.path.join(__lowerCAmelCase , F'train_pseudo.{args.data_file_extension}' )
if args.data_file_extension == "csv":
dataset.to_csv(__lowerCAmelCase , index=__lowerCAmelCase )
else:
dataset.to_json(__lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , **__lowerCAmelCase : Optional[Any] ):
a__ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
a__ = STModelArguments(model_name_or_path=__lowerCAmelCase )
a__ = STDataArguments(train_file=__lowerCAmelCase , infer_file=__lowerCAmelCase )
a__ = STTrainingArguments(output_dir=__lowerCAmelCase )
a__ = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(__lowerCAmelCase ).items():
setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for key, value in kwargs.items():
if hasattr(__lowerCAmelCase , __lowerCAmelCase ):
setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Sanity checks
a__ = {}
a__ = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
a__ = args.train_file
a__ = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
a__ = args.eval_file
for key in data_files:
a__ = data_files[key].split('.' )[-1]
assert extension in ["csv", "json"], F'`{key}_file` should be a csv or a json file.'
if args.data_file_extension is None:
a__ = extension
else:
assert extension == args.data_file_extension, F'`{key}_file` should be a {args.data_file_extension} file`.'
assert (
args.eval_metric in datasets.list_metrics()
), F'{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('Creating the initial data directory for self-training...' )
a__ = F'{args.output_dir}/self-train_iter-{{}}'.format
a__ = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=__lowerCAmelCase )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
accelerator.wait_for_everyone()
a__ = None
a__ = None
a__ = 0
a__ = False
# Show the progress bar
a__ = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
a__ = data_dir_format(__lowerCAmelCase )
assert os.path.exists(__lowerCAmelCase )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
a__ = os.path.join(__lowerCAmelCase , 'stage-1' )
a__ = {
'accelerator': accelerator,
'model_name_or_path': args.model_name_or_path,
'cache_dir': args.cache_dir,
'do_train': True,
'train_file': data_files['train'] if iteration == 0 else data_files['train_pseudo'],
'do_eval': True if args.eval_file is not None else False,
'eval_file': data_files['eval'],
'do_predict': True,
'infer_file': data_files['infer'],
'task_name': args.task_name,
'label_list': args.label_list,
'output_dir': current_output_dir,
'eval_metric': args.eval_metric,
'evaluation_strategy': args.evaluation_strategy,
'early_stopping_patience': args.early_stopping_patience,
'early_stopping_threshold': args.early_stopping_threshold,
'seed': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(__lowerCAmelCase , __lowerCAmelCase ):
arguments_dict.update({key: value} )
a__ = os.path.join(__lowerCAmelCase , 'best-checkpoint' , __lowerCAmelCase )
if os.path.exists(__lowerCAmelCase ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.' , __lowerCAmelCase , __lowerCAmelCase , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 1 *****' , __lowerCAmelCase )
finetune(**__lowerCAmelCase )
accelerator.wait_for_everyone()
assert os.path.exists(__lowerCAmelCase )
logger.info('Self-training job completed: iteration: %d, stage: 1.' , __lowerCAmelCase )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
a__ = os.path.join(__lowerCAmelCase , 'best-checkpoint' )
a__ = os.path.join(__lowerCAmelCase , 'stage-2' )
# Update arguments_dict
a__ = model_path
a__ = data_files['train']
a__ = current_output_dir
a__ = os.path.join(__lowerCAmelCase , 'best-checkpoint' , __lowerCAmelCase )
if os.path.exists(__lowerCAmelCase ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.' , __lowerCAmelCase , __lowerCAmelCase , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 2 *****' , __lowerCAmelCase )
finetune(**__lowerCAmelCase )
accelerator.wait_for_everyone()
assert os.path.exists(__lowerCAmelCase )
logger.info('Self-training job completed: iteration: %d, stage: 2.' , __lowerCAmelCase )
a__ = iteration
a__ = data_dir_format(iteration + 1 )
a__ = AutoConfig.from_pretrained(os.path.join(__lowerCAmelCase , 'best-checkpoint' ) )
a__ = config.idalabel
a__ = os.path.join(__lowerCAmelCase , 'eval_results_best-checkpoint.json' )
a__ = os.path.join(__lowerCAmelCase , 'test_results_best-checkpoint.json' )
assert os.path.exists(__lowerCAmelCase )
with open(__lowerCAmelCase , 'r' ) as f:
a__ = float(json.load(__lowerCAmelCase )[args.eval_metric] )
a__ = os.path.join(__lowerCAmelCase , 'infer_output_best-checkpoint.csv' )
assert os.path.exists(__lowerCAmelCase )
# Loading the dataset from local csv or json files.
a__ = load_dataset(args.data_file_extension , data_files={'data': data_files['infer']} )['data']
a__ = load_dataset('csv' , data_files={'data': infer_output_file} )['data']
if accelerator.is_main_process:
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
shutil.copy(__lowerCAmelCase , os.path.join(__lowerCAmelCase , F'eval_results_iter-{iteration}.json' ) )
if os.path.exists(__lowerCAmelCase ):
shutil.copy(__lowerCAmelCase , os.path.join(__lowerCAmelCase , F'test_results_iter-{iteration}.json' ) )
create_pseudo_labeled_data(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
accelerator.wait_for_everyone()
a__ = os.path.join(__lowerCAmelCase , F'train_pseudo.{args.data_file_extension}' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
a__ = eval_result
if best_iteration is None:
a__ = new_iteration
a__ = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
a__ = new_iteration
a__ = new_eval_result
a__ = 0
else:
if new_eval_result == best_eval_result:
a__ = new_iteration
a__ = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
a__ = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('Best iteration: %d' , __lowerCAmelCase )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , __lowerCAmelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__lowerCAmelCase , F'eval_results_iter-{iteration}.json' ) , os.path.join(__lowerCAmelCase , 'eval_results_best-iteration.json' ) , )
else:
# Assume that the last iteration is the best
logger.info('Best iteration: %d' , args.max_selftrain_iterations - 1 )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , __lowerCAmelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__lowerCAmelCase , F'eval_results_iter-{args.max_selftrain_iterations - 1}.json' ) , os.path.join(__lowerCAmelCase , 'eval_results_best-iteration.json' ) , )
| 703 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class snake_case_ :
def __init__( self :Optional[Any] ,__snake_case :str ,__snake_case :Optional[Any]=14 ,__snake_case :Dict=7 ,__snake_case :Optional[int]=True ,__snake_case :Optional[int]=True ,__snake_case :Dict=True ,__snake_case :List[Any]=True ,__snake_case :Optional[int]=True ,__snake_case :Any=99 ,__snake_case :List[str]=32 ,__snake_case :List[str]=5 ,__snake_case :Tuple=4 ,__snake_case :Optional[int]=37 ,__snake_case :Optional[int]="gelu" ,__snake_case :Tuple=0.1 ,__snake_case :Tuple=0.1 ,__snake_case :Dict=5_12 ,__snake_case :Union[str, Any]=16 ,__snake_case :str=2 ,__snake_case :Optional[Any]=0.02 ,__snake_case :Dict=3 ,__snake_case :Optional[Any]=4 ,__snake_case :Optional[Any]=None ,) -> Tuple:
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_token_type_ids
a__ = use_input_mask
a__ = use_labels
a__ = use_mc_token_ids
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_labels
a__ = num_choices
a__ = scope
a__ = self.vocab_size - 1
def lowerCamelCase__( self :Optional[int] ) -> Union[str, Any]:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
a__ = None
if self.use_input_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
a__ = None
if self.use_mc_token_ids:
a__ = ids_tensor([self.batch_size, self.num_choices] ,self.seq_length )
a__ = None
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
a__ = ids_tensor([self.batch_size] ,self.num_choices )
a__ = self.get_config()
a__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase__( self :Optional[Any] ) -> Tuple:
return CTRLConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
def lowerCamelCase__( self :str ,__snake_case :List[str] ,__snake_case :Any ,__snake_case :Dict ,__snake_case :int ,__snake_case :Optional[Any] ,*__snake_case :List[str] ) -> List[Any]:
a__ = CTRLModel(config=__snake_case )
model.to(__snake_case )
model.eval()
model(__snake_case ,token_type_ids=__snake_case ,head_mask=__snake_case )
model(__snake_case ,token_type_ids=__snake_case )
a__ = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) ,config.n_layer )
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[str] ,__snake_case :Union[str, Any] ,__snake_case :str ,__snake_case :str ,__snake_case :Dict ,*__snake_case :Dict ) -> Dict:
a__ = CTRLLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__( self :Optional[Any] ) -> Optional[Any]:
a__ = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) = config_and_inputs
a__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return config, inputs_dict
def lowerCamelCase__( self :Optional[int] ,__snake_case :Tuple ,__snake_case :str ,__snake_case :str ,__snake_case :List[str] ,*__snake_case :Optional[int] ) -> List[Any]:
a__ = self.num_labels
a__ = CTRLForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
@require_torch
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase__ : Any = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCAmelCase__ : Any = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : List[str] = False
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Optional[int] ,__snake_case :int ,__snake_case :Any ,__snake_case :List[str] ,__snake_case :Dict ) -> Union[str, Any]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowerCamelCase__( self :int ) -> List[str]:
a__ = CTRLModelTester(self )
a__ = ConfigTester(self ,config_class=__snake_case ,n_embd=37 )
def lowerCamelCase__( self :str ) -> str:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__( self :Tuple ) -> List[Any]:
self.config_tester.run_common_tests()
def lowerCamelCase__( self :str ) -> str:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__snake_case )
def lowerCamelCase__( self :List[Any] ) -> Any:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__snake_case )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase__( self :Union[str, Any] ) -> Tuple:
pass
@slow
def lowerCamelCase__( self :int ) -> List[Any]:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = CTRLModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def lowerCamelCase__( self :Dict ) -> List[str]:
pass
@require_torch
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Union[str, Any] ) -> str:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowerCamelCase__( self :Any ) -> Dict:
a__ = CTRLLMHeadModel.from_pretrained('ctrl' )
model.to(__snake_case )
a__ = torch.tensor(
[[1_18_59, 0, 16_11, 8]] ,dtype=torch.long ,device=__snake_case ) # Legal the president is
a__ = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
a__ = model.generate(__snake_case ,do_sample=__snake_case )
self.assertListEqual(output_ids[0].tolist() ,__snake_case )
| 657 | 0 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class snake_case_ :
def __init__( self :Optional[int] ,__snake_case :str ,__snake_case :Tuple=13 ,__snake_case :int=7 ,__snake_case :Optional[int]=True ,__snake_case :List[Any]=True ,__snake_case :int=True ,__snake_case :int=True ,__snake_case :int=99 ,__snake_case :Optional[Any]=32 ,__snake_case :int=2 ,__snake_case :List[Any]=4 ,__snake_case :int=37 ,__snake_case :Union[str, Any]="gelu" ,__snake_case :int=0.1 ,__snake_case :Tuple=0.1 ,__snake_case :Tuple=5_12 ,__snake_case :List[Any]=16 ,__snake_case :Union[str, Any]=2 ,__snake_case :Dict=0.02 ,__snake_case :List[Any]=3 ,__snake_case :Tuple=4 ,__snake_case :int=None ,__snake_case :Tuple=10_00 ,) -> str:
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_input_mask
a__ = use_token_type_ids
a__ = use_labels
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_labels
a__ = num_choices
a__ = scope
a__ = range_bbox
def lowerCamelCase__( self :str ) -> Dict:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
a__ = ids_tensor([self.batch_size, self.seq_length, 4] ,self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
a__ = bbox[i, j, 3]
a__ = bbox[i, j, 1]
a__ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
a__ = bbox[i, j, 2]
a__ = bbox[i, j, 0]
a__ = t
a__ = tf.convert_to_tensor(UpperCamelCase_ )
a__ = None
if self.use_input_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
a__ = None
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
a__ = ids_tensor([self.batch_size] ,self.num_choices )
a__ = LayoutLMConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__( self :List[str] ,__snake_case :Dict ,__snake_case :Optional[int] ,__snake_case :Optional[int] ,__snake_case :List[str] ,__snake_case :List[Any] ,__snake_case :int ,__snake_case :Any ,__snake_case :List[Any] ) -> Any:
a__ = TFLayoutLMModel(config=UpperCamelCase_ )
a__ = model(UpperCamelCase_ ,UpperCamelCase_ ,attention_mask=UpperCamelCase_ ,token_type_ids=UpperCamelCase_ )
a__ = model(UpperCamelCase_ ,UpperCamelCase_ ,token_type_ids=UpperCamelCase_ )
a__ = model(UpperCamelCase_ ,UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def lowerCamelCase__( self :List[Any] ,__snake_case :List[Any] ,__snake_case :Optional[int] ,__snake_case :Dict ,__snake_case :Optional[int] ,__snake_case :List[str] ,__snake_case :Dict ,__snake_case :List[str] ,__snake_case :List[str] ) -> Optional[Any]:
a__ = TFLayoutLMForMaskedLM(config=UpperCamelCase_ )
a__ = model(UpperCamelCase_ ,UpperCamelCase_ ,attention_mask=UpperCamelCase_ ,token_type_ids=UpperCamelCase_ ,labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Dict ,__snake_case :Optional[Any] ,__snake_case :Optional[int] ,__snake_case :List[str] ,__snake_case :Tuple ,__snake_case :str ,__snake_case :str ,__snake_case :Union[str, Any] ) -> Dict:
a__ = self.num_labels
a__ = TFLayoutLMForSequenceClassification(config=UpperCamelCase_ )
a__ = model(UpperCamelCase_ ,UpperCamelCase_ ,attention_mask=UpperCamelCase_ ,token_type_ids=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def lowerCamelCase__( self :Any ,__snake_case :str ,__snake_case :List[Any] ,__snake_case :Optional[Any] ,__snake_case :Dict ,__snake_case :Union[str, Any] ,__snake_case :str ,__snake_case :int ,__snake_case :List[Any] ) -> Optional[int]:
a__ = self.num_labels
a__ = TFLayoutLMForTokenClassification(config=UpperCamelCase_ )
a__ = model(UpperCamelCase_ ,UpperCamelCase_ ,attention_mask=UpperCamelCase_ ,token_type_ids=UpperCamelCase_ ,labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Optional[Any] ,__snake_case :Any ,__snake_case :Dict ,__snake_case :Tuple ,__snake_case :Dict ,__snake_case :Optional[int] ,__snake_case :Tuple ,__snake_case :str ) -> Union[str, Any]:
a__ = TFLayoutLMForQuestionAnswering(config=UpperCamelCase_ )
a__ = model(UpperCamelCase_ ,UpperCamelCase_ ,attention_mask=UpperCamelCase_ ,token_type_ids=UpperCamelCase_ )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def lowerCamelCase__( self :List[str] ) -> int:
a__ = self.prepare_config_and_inputs()
(
a__
) = config_and_inputs
a__ = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : str = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : Union[str, Any] = (
{
'''feature-extraction''': TFLayoutLMModel,
'''fill-mask''': TFLayoutLMForMaskedLM,
'''text-classification''': TFLayoutLMForSequenceClassification,
'''token-classification''': TFLayoutLMForTokenClassification,
'''zero-shot''': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : Dict = 1_0
def lowerCamelCase__( self :Optional[Any] ) -> Union[str, Any]:
a__ = TFLayoutLMModelTester(self )
a__ = ConfigTester(self ,config_class=UpperCamelCase_ ,hidden_size=37 )
def lowerCamelCase__( self :Optional[Any] ) -> int:
self.config_tester.run_common_tests()
def lowerCamelCase__( self :Any ) -> List[Any]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase__( self :List[Any] ) -> Union[str, Any]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase_ )
def lowerCamelCase__( self :Optional[Any] ) -> Union[str, Any]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase_ )
def lowerCamelCase__( self :Optional[int] ) -> int:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase_ )
def lowerCamelCase__( self :int ) -> int:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase_ )
@slow
def lowerCamelCase__( self :str ) -> Dict:
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = TFLayoutLMModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@unittest.skip('Onnx compliancy broke with TF 2.10' )
def lowerCamelCase__( self :Tuple ) -> Optional[Any]:
pass
def __lowercase ( ):
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
a__ = tf.convert_to_tensor([[1_0_1,1_0_1_9,1_0_1_4,1_0_1_6,1_0_3_7,1_2_8_4_9,4_7_4_7,1_0_0_4,1_4_2_4_6,2_2_7_8,5_4_3_9,4_5_2_4,5_0_0_2,2_9_3_0,2_1_9_3,2_9_3_0,4_3_4_1,3_2_0_8,1_0_0_5,1_0_5_5,2_1_7_1,2_8_4_8,1_1_3_0_0,3_5_3_1,1_0_2],[1_0_1,4_0_7_0,4_0_3_4,7_0_2_0,1_0_2_4,3_0_5_8,1_0_1_5,1_0_1_3,2_8_6_1,1_0_1_3,6_0_7_0,1_9_2_7_4,2_7_7_2,6_2_0_5,2_7_8_1_4,1_6_1_4_7,1_6_1_4_7,4_3_4_3,2_0_4_7,1_0_2_8_3,1_0_9_6_9,1_4_3_8_9,1_0_1_2,2_3_3_8,1_0_2]] ) # noqa: E231
a__ = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
a__ = tf.convert_to_tensor([[[0,0,0,0],[4_2_3,2_3_7,4_4_0,2_5_1],[4_2_7,2_7_2,4_4_1,2_8_7],[4_1_9,1_1_5,4_3_7,1_2_9],[9_6_1,8_8_5,9_9_2,9_1_2],[2_5_6,3_8,3_3_0,5_8],[2_5_6,3_8,3_3_0,5_8],[3_3_6,4_2,3_5_3,5_7],[3_6_0,3_9,4_0_1,5_6],[3_6_0,3_9,4_0_1,5_6],[4_1_1,3_9,4_7_1,5_9],[4_7_9,4_1,5_2_8,5_9],[5_3_3,3_9,6_3_0,6_0],[6_7,1_1_3,1_3_4,1_3_1],[1_4_1,1_1_5,2_0_9,1_3_2],[6_8,1_4_9,1_3_3,1_6_6],[1_4_1,1_4_9,1_8_7,1_6_4],[1_9_5,1_4_8,2_8_7,1_6_5],[1_9_5,1_4_8,2_8_7,1_6_5],[1_9_5,1_4_8,2_8_7,1_6_5],[2_9_5,1_4_8,3_4_9,1_6_5],[4_4_1,1_4_9,4_9_2,1_6_6],[4_9_7,1_4_9,5_4_6,1_6_4],[6_4,2_0_1,1_2_5,2_1_8],[1_0_0_0,1_0_0_0,1_0_0_0,1_0_0_0]],[[0,0,0,0],[6_6_2,1_5_0,7_5_4,1_6_6],[6_6_5,1_9_9,7_4_2,2_1_1],[5_1_9,2_1_3,5_5_4,2_2_8],[5_1_9,2_1_3,5_5_4,2_2_8],[1_3_4,4_3_3,1_8_7,4_5_4],[1_3_0,4_6_7,2_0_4,4_8_0],[1_3_0,4_6_7,2_0_4,4_8_0],[1_3_0,4_6_7,2_0_4,4_8_0],[1_3_0,4_6_7,2_0_4,4_8_0],[1_3_0,4_6_7,2_0_4,4_8_0],[3_1_4,4_6_9,3_7_6,4_8_2],[5_0_4,6_8_4,5_8_2,7_0_6],[9_4_1,8_2_5,9_7_3,9_0_0],[9_4_1,8_2_5,9_7_3,9_0_0],[9_4_1,8_2_5,9_7_3,9_0_0],[9_4_1,8_2_5,9_7_3,9_0_0],[6_1_0,7_4_9,6_5_2,7_6_5],[1_3_0,6_5_9,1_6_8,6_7_2],[1_7_6,6_5_7,2_3_7,6_7_2],[2_3_8,6_5_7,3_1_2,6_7_2],[4_4_3,6_5_3,6_2_8,6_7_2],[4_4_3,6_5_3,6_2_8,6_7_2],[7_1_6,3_0_1,8_2_5,3_1_7],[1_0_0_0,1_0_0_0,1_0_0_0,1_0_0_0]]] ) # noqa: E231
a__ = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
a__ = tf.convert_to_tensor([[-1_0_0,1_0,1_0,1_0,9,1,-1_0_0,7,7,-1_0_0,7,7,4,2,5,2,8,8,-1_0_0,-1_0_0,5,0,3,2,-1_0_0],[-1_0_0,1_2,1_2,1_2,-1_0_0,1_2,1_0,-1_0_0,-1_0_0,-1_0_0,-1_0_0,1_0,1_2,9,-1_0_0,-1_0_0,-1_0_0,1_0,1_0,1_0,9,1_2,-1_0_0,1_0,-1_0_0]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class snake_case_ (unittest.TestCase ):
@slow
def lowerCamelCase__( self :Any ) -> Any:
a__ = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' )
a__ = prepare_layoutlm_batch_inputs()
# forward pass
a__ = model(input_ids=UpperCamelCase_ ,bbox=UpperCamelCase_ ,attention_mask=UpperCamelCase_ ,token_type_ids=UpperCamelCase_ )
# test the sequence output on [0, :3, :3]
a__ = tf.convert_to_tensor(
[[0.17_85, -0.19_47, -0.04_25], [-0.32_54, -0.28_07, 0.25_53], [-0.53_91, -0.33_22, 0.33_64]] ,)
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] ,UpperCamelCase_ ,atol=1E-3 ) )
# test the pooled output on [1, :3]
a__ = tf.convert_to_tensor([-0.65_80, -0.02_14, 0.85_52] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] ,UpperCamelCase_ ,atol=1E-3 ) )
@slow
def lowerCamelCase__( self :List[Any] ) -> int:
a__ = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' ,num_labels=2 )
a__ = prepare_layoutlm_batch_inputs()
# forward pass
a__ = model(
input_ids=UpperCamelCase_ ,bbox=UpperCamelCase_ ,attention_mask=UpperCamelCase_ ,token_type_ids=UpperCamelCase_ ,labels=tf.convert_to_tensor([1, 1] ) ,)
# test whether we get a loss as a scalar
a__ = outputs.loss
a__ = (2,)
self.assertEqual(loss.shape ,UpperCamelCase_ )
# test the shape of the logits
a__ = outputs.logits
a__ = (2, 2)
self.assertEqual(logits.shape ,UpperCamelCase_ )
@slow
def lowerCamelCase__( self :Any ) -> Any:
a__ = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' ,num_labels=13 )
a__ = prepare_layoutlm_batch_inputs()
# forward pass
a__ = model(
input_ids=UpperCamelCase_ ,bbox=UpperCamelCase_ ,attention_mask=UpperCamelCase_ ,token_type_ids=UpperCamelCase_ ,labels=UpperCamelCase_ )
# test the shape of the logits
a__ = outputs.logits
a__ = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape ,UpperCamelCase_ )
@slow
def lowerCamelCase__( self :Optional[int] ) -> Tuple:
a__ = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' )
a__ = prepare_layoutlm_batch_inputs()
# forward pass
a__ = model(input_ids=UpperCamelCase_ ,bbox=UpperCamelCase_ ,attention_mask=UpperCamelCase_ ,token_type_ids=UpperCamelCase_ )
# test the shape of the logits
a__ = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape ,UpperCamelCase_ )
self.assertEqual(outputs.end_logits.shape ,UpperCamelCase_ )
| 704 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = 1
@register_to_config
def __init__( self :Optional[int] ,__snake_case :int = 10_00 ,__snake_case :Optional[Union[np.ndarray, List[float]]] = None ) -> int:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__snake_case )
# standard deviation of the initial noise distribution
a__ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
a__ = 4
# running values
a__ = []
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :int ,__snake_case :Union[str, torch.device] = None ) -> Union[str, Any]:
a__ = num_inference_steps
a__ = torch.linspace(1 ,0 ,num_inference_steps + 1 )[:-1]
a__ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
a__ = torch.tensor(self.config.trained_betas ,dtype=torch.floataa )
else:
a__ = torch.sin(steps * math.pi / 2 ) ** 2
a__ = (1.0 - self.betas**2) ** 0.5
a__ = (torch.atana(self.betas ,self.alphas ) / math.pi * 2)[:-1]
a__ = timesteps.to(__snake_case )
a__ = []
def lowerCamelCase__( self :Any ,__snake_case :torch.FloatTensor ,__snake_case :int ,__snake_case :torch.FloatTensor ,__snake_case :bool = True ,) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
a__ = (self.timesteps == timestep).nonzero().item()
a__ = timestep_index + 1
a__ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__snake_case )
if len(self.ets ) == 1:
a__ = self.ets[-1]
elif len(self.ets ) == 2:
a__ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
a__ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
a__ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
a__ = self._get_prev_sample(__snake_case ,__snake_case ,__snake_case ,__snake_case )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :torch.FloatTensor ,*__snake_case :int ,**__snake_case :Optional[int] ) -> torch.FloatTensor:
return sample
def lowerCamelCase__( self :Optional[Any] ,__snake_case :List[Any] ,__snake_case :Optional[int] ,__snake_case :Dict ,__snake_case :Any ) -> Optional[Any]:
a__ = self.alphas[timestep_index]
a__ = self.betas[timestep_index]
a__ = self.alphas[prev_timestep_index]
a__ = self.betas[prev_timestep_index]
a__ = (sample - sigma * ets) / max(__snake_case ,1E-8 )
a__ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self :Any ) -> Union[str, Any]:
return self.config.num_train_timesteps
| 657 | 0 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
snake_case : List[Any] = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : str=None , __lowerCAmelCase : Dict=None , ):
if attention_mask is None:
a__ = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
a__ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
a__ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
a__ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
a__ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class snake_case_ :
def __init__( self :List[Any] ,__snake_case :List[Any] ,__snake_case :Union[str, Any]=13 ,__snake_case :Optional[int]=7 ,__snake_case :str=True ,__snake_case :List[Any]=False ,__snake_case :Dict=99 ,__snake_case :List[Any]=16 ,__snake_case :str=2 ,__snake_case :Union[str, Any]=4 ,__snake_case :str=4 ,__snake_case :int="gelu" ,__snake_case :List[Any]=0.1 ,__snake_case :Optional[Any]=0.1 ,__snake_case :Union[str, Any]=32 ,__snake_case :Optional[Any]=2 ,__snake_case :Any=1 ,__snake_case :List[Any]=0 ,__snake_case :Any=0.02 ,) -> Union[str, Any]:
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_labels
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = eos_token_id
a__ = pad_token_id
a__ = bos_token_id
a__ = initializer_range
def lowerCamelCase__( self :List[str] ) -> Tuple:
a__ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size ) ,3 ,self.vocab_size )
a__ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) ,dtype=np.intaa )) ,-1 )
a__ = shift_tokens_right(__lowerCamelCase ,1 ,2 )
a__ = BlenderbotConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_id=self.eos_token_id ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,initializer_range=self.initializer_range ,use_cache=__lowerCamelCase ,)
a__ = prepare_blenderbot_inputs_dict(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
return config, inputs_dict
def lowerCamelCase__( self :Union[str, Any] ) -> Optional[int]:
a__ = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase__( self :Tuple ,__snake_case :Tuple ,__snake_case :Tuple ,__snake_case :Tuple ) -> Union[str, Any]:
a__ = 20
a__ = model_class_name(__lowerCamelCase )
a__ = model.encode(inputs_dict['input_ids'] )
a__ = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
a__ = model.init_cache(decoder_input_ids.shape[0] ,__lowerCamelCase ,__lowerCamelCase )
a__ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) ,dtype='i4' )
a__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] ,(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) ,)
a__ = model.decode(
decoder_input_ids[:, :-1] ,__lowerCamelCase ,decoder_attention_mask=__lowerCamelCase ,past_key_values=__lowerCamelCase ,decoder_position_ids=__lowerCamelCase ,)
a__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] ,dtype='i4' )
a__ = model.decode(
decoder_input_ids[:, -1:] ,__lowerCamelCase ,decoder_attention_mask=__lowerCamelCase ,past_key_values=outputs_cache.past_key_values ,decoder_position_ids=__lowerCamelCase ,)
a__ = model.decode(__lowerCamelCase ,__lowerCamelCase )
a__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 ,msg=F'Max diff is {diff}' )
def lowerCamelCase__( self :List[str] ,__snake_case :Optional[int] ,__snake_case :Any ,__snake_case :Optional[Any] ) -> Tuple:
a__ = 20
a__ = model_class_name(__lowerCamelCase )
a__ = model.encode(inputs_dict['input_ids'] )
a__ = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
a__ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] ,axis=-1 ,)
a__ = model.init_cache(decoder_input_ids.shape[0] ,__lowerCamelCase ,__lowerCamelCase )
a__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] ,(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) ,)
a__ = model.decode(
decoder_input_ids[:, :-1] ,__lowerCamelCase ,decoder_attention_mask=__lowerCamelCase ,past_key_values=__lowerCamelCase ,decoder_position_ids=__lowerCamelCase ,)
a__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] ,dtype='i4' )
a__ = model.decode(
decoder_input_ids[:, -1:] ,__lowerCamelCase ,past_key_values=outputs_cache.past_key_values ,decoder_attention_mask=__lowerCamelCase ,decoder_position_ids=__lowerCamelCase ,)
a__ = model.decode(__lowerCamelCase ,__lowerCamelCase ,decoder_attention_mask=__lowerCamelCase )
a__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 ,msg=F'Max diff is {diff}' )
@require_flax
class snake_case_ (unittest.TestCase ):
UpperCAmelCase__ : Tuple = 9_9
def lowerCamelCase__( self :Optional[Any] ) -> List[str]:
a__ = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] ,dtype=np.intaa ,)
a__ = input_ids.shape[0]
a__ = BlenderbotConfig(
vocab_size=self.vocab_size ,d_model=24 ,encoder_layers=2 ,decoder_layers=2 ,encoder_attention_heads=2 ,decoder_attention_heads=2 ,encoder_ffn_dim=32 ,decoder_ffn_dim=32 ,max_position_embeddings=48 ,eos_token_id=2 ,pad_token_id=1 ,bos_token_id=0 ,)
return config, input_ids, batch_size
def lowerCamelCase__( self :Tuple ) -> Union[str, Any]:
a__ = self._get_config_and_data()
a__ = FlaxBlenderbotForConditionalGeneration(__lowerCamelCase )
a__ = lm_model(input_ids=__lowerCamelCase )
a__ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape ,__lowerCamelCase )
def lowerCamelCase__( self :Tuple ) -> List[str]:
a__ = BlenderbotConfig(
vocab_size=self.vocab_size ,d_model=14 ,encoder_layers=2 ,decoder_layers=2 ,encoder_attention_heads=2 ,decoder_attention_heads=2 ,encoder_ffn_dim=8 ,decoder_ffn_dim=8 ,max_position_embeddings=48 ,)
a__ = FlaxBlenderbotForConditionalGeneration(__lowerCamelCase )
a__ = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] ,dtype=np.intaa )
a__ = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] ,dtype=np.intaa )
a__ = lm_model(input_ids=__lowerCamelCase ,decoder_input_ids=__lowerCamelCase )
a__ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape ,__lowerCamelCase )
def lowerCamelCase__( self :Optional[int] ) -> Any:
a__ = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] ,dtype=np.intaa )
a__ = shift_tokens_right(__lowerCamelCase ,1 ,2 )
a__ = np.equal(__lowerCamelCase ,1 ).astype(np.floataa ).sum()
a__ = np.equal(__lowerCamelCase ,1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape ,input_ids.shape )
self.assertEqual(__lowerCamelCase ,n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] ,2 ).all() )
@require_flax
class snake_case_ (lowercase__ , unittest.TestCase , lowercase__ ):
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : Optional[Any] = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
UpperCAmelCase__ : Optional[Any] = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowerCamelCase__( self :Optional[Any] ) -> Any:
a__ = FlaxBlenderbotModelTester(self )
def lowerCamelCase__( self :Dict ) -> List[str]:
a__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
def lowerCamelCase__( self :List[Any] ) -> Tuple:
a__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
def lowerCamelCase__( self :Optional[int] ) -> int:
a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
a__ = self._prepare_for_class(__lowerCamelCase ,__lowerCamelCase )
a__ = model_class(__lowerCamelCase )
@jax.jit
def encode_jitted(__snake_case :List[str] ,__snake_case :Tuple=None ,**__snake_case :int ):
return model.encode(input_ids=__lowerCamelCase ,attention_mask=__lowerCamelCase )
with self.subTest('JIT Enabled' ):
a__ = encode_jitted(**__lowerCamelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
a__ = encode_jitted(**__lowerCamelCase ).to_tuple()
self.assertEqual(len(__lowerCamelCase ) ,len(__lowerCamelCase ) )
for jitted_output, output in zip(__lowerCamelCase ,__lowerCamelCase ):
self.assertEqual(jitted_output.shape ,output.shape )
def lowerCamelCase__( self :Optional[int] ) -> Optional[int]:
a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
a__ = model_class(__lowerCamelCase )
a__ = model.encode(inputs_dict['input_ids'] ,inputs_dict['attention_mask'] )
a__ = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(__snake_case :Dict ,__snake_case :Any ,__snake_case :str ):
return model.decode(
decoder_input_ids=__lowerCamelCase ,decoder_attention_mask=__lowerCamelCase ,encoder_outputs=__lowerCamelCase ,)
with self.subTest('JIT Enabled' ):
a__ = decode_jitted(**__lowerCamelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
a__ = decode_jitted(**__lowerCamelCase ).to_tuple()
self.assertEqual(len(__lowerCamelCase ) ,len(__lowerCamelCase ) )
for jitted_output, output in zip(__lowerCamelCase ,__lowerCamelCase ):
self.assertEqual(jitted_output.shape ,output.shape )
@slow
def lowerCamelCase__( self :Any ) -> Optional[int]:
for model_class_name in self.all_model_classes:
a__ = model_class_name.from_pretrained('facebook/blenderbot-400M-distill' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
a__ = np.ones((1, 1) ) * model.config.eos_token_id
a__ = model(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@unittest.skipUnless(jax_device != 'cpu' ,'3B test too slow on CPU.' )
@slow
def lowerCamelCase__( self :List[Any] ) -> List[Any]:
a__ = {"num_beams": 1, "early_stopping": True, "min_length": 15, "max_length": 25}
a__ = {"skip_special_tokens": True, "clean_up_tokenization_spaces": True}
a__ = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-3B' ,from_pt=__lowerCamelCase )
a__ = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-3B' )
a__ = ["Sam"]
a__ = tokenizer(__lowerCamelCase ,return_tensors='jax' )
a__ = model.generate(**__lowerCamelCase ,**__lowerCamelCase )
a__ = "Sam is a great name. It means \"sun\" in Gaelic."
a__ = tokenizer.batch_decode(__lowerCamelCase ,**__lowerCamelCase )
assert generated_txt[0].strip() == tgt_text
| 705 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case : Any = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Union[str, Any] = ['''MobileViTFeatureExtractor''']
snake_case : int = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Dict = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Tuple = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
snake_case : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 0 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
snake_case : Tuple = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
snake_case : Optional[int] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f"""{len(upper_files)} files contain uppercase characters:""")
print('''\n'''.join(upper_files) + '''\n''')
snake_case : int = [file for file in filepaths if ''' ''' in file]
if space_files:
print(f"""{len(space_files)} files contain space characters:""")
print('''\n'''.join(space_files) + '''\n''')
snake_case : int = [file for file in filepaths if '''-''' in file]
if hyphen_files:
print(f"""{len(hyphen_files)} files contain hyphen characters:""")
print('''\n'''.join(hyphen_files) + '''\n''')
snake_case : Dict = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f"""{len(nodir_files)} files are not in a directory:""")
print('''\n'''.join(nodir_files) + '''\n''')
snake_case : List[Any] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 706 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
snake_case : Dict = logging.get_logger(__name__)
snake_case : Any = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
a__ = TOKENIZER_CLASSES
else:
a__ = {tokenizer_name: getattr(__lowerCAmelCase , tokenizer_name + 'Fast' )}
logger.info(F'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
a__ = TOKENIZER_CLASSES[tokenizer_name]
a__ = True
if checkpoint_name is None:
a__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
a__ = [checkpoint_name]
logger.info(F'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(F'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
a__ = tokenizer_class.from_pretrained(__lowerCAmelCase , force_download=__lowerCAmelCase )
# Save fast tokenizer
logger.info(F'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
a__ , a__ = checkpoint.split('/' )
a__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
elif add_prefix:
a__ = checkpoint
a__ = dump_path
else:
a__ = None
a__ = dump_path
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
a__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
a__ = file_path.split(__lowerCAmelCase )[-1][0]
if next_char == "/":
a__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
a__ = None
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
a__ = tokenizer.save_pretrained(
__lowerCAmelCase , legacy_format=__lowerCAmelCase , filename_prefix=__lowerCAmelCase )
logger.info(F'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(__lowerCAmelCase )
logger.info(F'=> removing {file_name}' )
if __name__ == "__main__":
snake_case : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
f"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
snake_case : List[str] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 657 | 0 |
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1_3_3_7 , num_examples=4_2 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1_3_3_7 , num_examples=4_2 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def __lowercase ( __lowerCAmelCase : SplitDict ):
a__ = split_dict._to_yaml_list()
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
a__ = SplitDict._from_yaml_list(__lowerCAmelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
a__ = None
# the split name of split_dict takes over the name of the split info object
a__ = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=__lowerCAmelCase ), SplitInfo(dataset_name='my_dataset' )] )
def __lowercase ( __lowerCAmelCase : Union[str, Any] ):
a__ = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 707 |
from math import ceil, sqrt
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_0_0_0 ):
a__ = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
a__ = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
a__ = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 657 | 0 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
snake_case : Any = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l='''
def __lowercase ( __lowerCAmelCase : str = "mumbai" ):
a__ = BeautifulSoup(requests.get(url + location ).content , 'html.parser' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('div' , attrs={'data-tn-component': 'organicJob'} ):
a__ = job.find('a' , attrs={'data-tn-element': 'jobTitle'} ).text.strip()
a__ = job.find('span' , {'class': 'company'} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('''Bangalore'''), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 708 |
from sklearn.metrics import fa_score
import datasets
snake_case : Optional[int] = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
snake_case : List[Any] = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
snake_case : Union[str, Any] = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ (datasets.Metric ):
def lowerCamelCase__( self :Any ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) ,reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] ,)
def lowerCamelCase__( self :Dict ,__snake_case :str ,__snake_case :str ,__snake_case :Dict=None ,__snake_case :str=1 ,__snake_case :Optional[int]="binary" ,__snake_case :Union[str, Any]=None ) -> Tuple:
a__ = fa_score(
__snake_case ,__snake_case ,labels=__snake_case ,pos_label=__snake_case ,average=__snake_case ,sample_weight=__snake_case )
return {"f1": float(__snake_case ) if score.size == 1 else score}
| 657 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case : Dict = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[int] = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
snake_case : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 709 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
snake_case : Any = logging.get_logger(__name__)
snake_case : Tuple = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class snake_case_ (lowerCamelCase_ ):
def __init__( self :str ,__snake_case :Dict=None ,__snake_case :int=None ,*__snake_case :str ,**__snake_case :Union[str, Any] ) -> Tuple:
super().__init__(*__snake_case ,**__snake_case )
if config is None:
assert isinstance(self.model ,__snake_case ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F' {self.model.__class__}'
)
a__ = self.model.config
else:
a__ = config
a__ = data_args
a__ = self.config.tgt_vocab_size if isinstance(self.config ,__snake_case ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'
' padding..' )
if self.args.label_smoothing == 0:
a__ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
a__ = label_smoothed_nll_loss
def lowerCamelCase__( self :Optional[Any] ,__snake_case :int ) -> Tuple:
if self.optimizer is None:
a__ = ['bias', 'LayerNorm.weight']
a__ = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
a__ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
a__ = Adafactor
a__ = {'scale_parameter': False, 'relative_step': False}
else:
a__ = AdamW
a__ = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
a__ = self.args.learning_rate
if self.sharded_ddp:
a__ = OSS(
params=__snake_case ,optim=__snake_case ,**__snake_case ,)
else:
a__ = optimizer_cls(__snake_case ,**__snake_case )
if self.lr_scheduler is None:
a__ = self._get_lr_scheduler(__snake_case )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def lowerCamelCase__( self :Dict ,__snake_case :List[str] ) -> Union[str, Any]:
a__ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
a__ = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
a__ = schedule_func(self.optimizer ,num_warmup_steps=self.args.warmup_steps )
else:
a__ = schedule_func(
self.optimizer ,num_warmup_steps=self.args.warmup_steps ,num_training_steps=__snake_case )
return scheduler
def lowerCamelCase__( self :Optional[Any] ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset ,torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size ,distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) ,)
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowerCamelCase__( self :str ,__snake_case :Optional[int] ,__snake_case :List[Any] ,__snake_case :Any ) -> Optional[Any]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
a__ = model(**__snake_case ,use_cache=__snake_case )[0]
a__ = self.loss_fn(logits.view(-1 ,logits.shape[-1] ) ,labels.view(-1 ) )
else:
# compute usual loss via models
a__ , a__ = model(**__snake_case ,labels=__snake_case ,use_cache=__snake_case )[:2]
else:
# compute label smoothed loss
a__ = model(**__snake_case ,use_cache=__snake_case )[0]
a__ = torch.nn.functional.log_softmax(__snake_case ,dim=-1 )
a__ , a__ = self.loss_fn(__snake_case ,__snake_case ,self.args.label_smoothing ,ignore_index=self.config.pad_token_id )
return loss, logits
def lowerCamelCase__( self :List[Any] ,__snake_case :Dict ,__snake_case :Optional[int] ) -> Any:
a__ = inputs.pop('labels' )
a__ , a__ = self._compute_loss(__snake_case ,__snake_case ,__snake_case )
return loss
def lowerCamelCase__( self :Optional[Any] ,__snake_case :nn.Module ,__snake_case :Dict[str, Union[torch.Tensor, Any]] ,__snake_case :bool ,__snake_case :Optional[List[str]] = None ,) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
a__ = self._prepare_inputs(__snake_case )
a__ = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
a__ = self.model.generate(
inputs['input_ids'] ,attention_mask=inputs['attention_mask'] ,**__snake_case ,)
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
a__ = self._pad_tensors_to_max_len(__snake_case ,gen_kwargs['max_length'] )
a__ = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
a__ , a__ = self._compute_loss(__snake_case ,__snake_case ,__snake_case )
a__ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
a__ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
a__ = self._pad_tensors_to_max_len(__snake_case ,gen_kwargs['max_length'] )
return (loss, logits, labels)
def lowerCamelCase__( self :List[str] ,__snake_case :Optional[Any] ,__snake_case :Union[str, Any] ) -> int:
# If PAD token is not defined at least EOS token has to be defined
a__ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
F' padded to `max_length`={max_length}' )
a__ = pad_token_id * torch.ones(
(tensor.shape[0], max_length) ,dtype=tensor.dtype ,device=tensor.device )
a__ = tensor
return padded_tensor
| 657 | 0 |
def A ( __lowerCAmelCase : Tuple = 1_0_0 ):
a__ = (n * (n + 1) // 2) ** 2
a__ = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 710 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
snake_case : Dict = '''
Human: <<task>>
Assistant: '''
snake_case : Optional[int] = '''huggingface-tools/default-prompts'''
snake_case : Tuple = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any="run" ):
if prompt_or_repo_id is None:
a__ = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('\\s' , __lowerCAmelCase ) is not None:
return prompt_or_repo_id
a__ = cached_file(
__lowerCAmelCase , PROMPT_FILES[mode] , repo_type='dataset' , user_agent={'agent': agent_name} )
with open(__lowerCAmelCase , 'r' , encoding='utf-8' ) as f:
return f.read()
| 657 | 0 |
from sklearn.metrics import mean_squared_error
import datasets
snake_case : Optional[Any] = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
snake_case : List[Any] = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n'
snake_case : int = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ (datasets.Metric ):
def lowerCamelCase__( self :Optional[Any] ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(self._get_feature_types() ) ,reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'
] ,)
def lowerCamelCase__( self :Dict ) -> List[Any]:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('float' ) ),
"references": datasets.Sequence(datasets.Value('float' ) ),
}
else:
return {
"predictions": datasets.Value('float' ),
"references": datasets.Value('float' ),
}
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Tuple ,__snake_case :Any ,__snake_case :Optional[int]=None ,__snake_case :List[str]="uniform_average" ,__snake_case :List[Any]=True ) -> Tuple:
a__ = mean_squared_error(
__A ,__A ,sample_weight=__A ,multioutput=__A ,squared=__A )
return {"mse": mse}
| 711 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
a__ = precision
a__ = ceil(precision / 1_4 )
a__ = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
a__ = 1
a__ = 1_3_5_9_1_4_0_9
a__ = Decimal(__lowerCAmelCase )
for k in range(1 , __lowerCAmelCase ):
a__ = factorial(6 * k ) // (factorial(3 * k ) * factorial(__lowerCAmelCase ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
snake_case : Tuple = 50
print(f"""The first {n} digits of pi is: {pi(n)}""")
| 657 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.