code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_a = get_tests_dir("fixtures/test_sentencepiece.model")
_a = {"target_lang": "fi", "source_lang": "en"}
_a = ">>zh<<"
_a = "Helsinki-NLP/"
if is_torch_available():
_a = "pt"
elif is_tf_available():
_a = "tf"
else:
_a = "jax"
@require_sentencepiece
class __A ( lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = MarianTokenizer
lowerCAmelCase_ = False
lowerCAmelCase_ = True
def __lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
lowerCamelCase__ = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
lowerCamelCase__ = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
lowerCamelCase__ = Path(self.tmpdirname )
save_json(__lowerCAmelCase , save_dir / VOCAB_FILES_NAMES['''vocab'''] )
save_json(__lowerCAmelCase , save_dir / VOCAB_FILES_NAMES['''tokenizer_config_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(__lowerCAmelCase , save_dir / VOCAB_FILES_NAMES['''source_spm'''] )
copyfile(__lowerCAmelCase , save_dir / VOCAB_FILES_NAMES['''target_spm'''] )
lowerCamelCase__ = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCamelCase ( self , **__lowerCAmelCase ):
'''simple docstring'''
return MarianTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = '''</s>'''
lowerCamelCase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) , __lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(__lowerCAmelCase ) , 9 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = MarianTokenizer.from_pretrained(F'{ORG_NAME}opus-mt-en-de' )
lowerCamelCase__ = en_de_tokenizer(['''I am a small frog'''] , return_tensors=__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = [3_8, 1_2_1, 1_4, 6_9_7, 3_8_8_4_8, 0]
self.assertListEqual(__lowerCAmelCase , batch.input_ids[0] )
lowerCamelCase__ = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(__lowerCAmelCase )
lowerCamelCase__ = [x.name for x in Path(__lowerCAmelCase ).glob('''*''' )]
self.assertIn('''source.spm''' , __lowerCAmelCase )
MarianTokenizer.from_pretrained(__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = tok(
['''I am a small frog''' * 1_0_0_0, '''I am a small frog'''] , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2) )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = tok(['''I am a tiny frog''', '''I am a small frog'''] , padding=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(batch_smaller.input_ids.shape , (2, 1_0) )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = {'''input_ids''': [[4_3_4_9_5, 4_6_2, 2_0, 4_2_1_6_4, 1_3_6_9, 5_2, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 7_4_9_1, 3_8_9_9_9, 6, 8, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 4_6_6_9, 3_7_8_6_7, 1_3, 7_5_2_5, 2_7, 1_5_9_3, 9_8_8, 1_3, 3_3_9_7_2, 7_0_2_9, 6, 2_0, 8_2_5_1, 3_8_3, 2, 2_7_0, 5_8_6_6, 3_7_8_8, 2, 2_3_5_3, 8_2_5_1, 1_2_3_3_8, 2, 1_3_9_5_8, 3_8_7, 2, 3_6_2_9, 6_9_5_3, 1_8_8, 2_9_0_0, 2, 1_3_9_5_8, 8_0_1_1, 1_1_5_0_1, 2_3, 8_4_6_0, 4_0_7_3, 3_4_0_0_9, 2_0, 4_3_5, 1_1_4_3_9, 2_7, 8, 8_4_6_0, 4_0_7_3, 6_0_0_4, 2_0, 9_9_8_8, 3_7_5, 2_7, 3_3, 2_6_6, 1_9_4_5, 1_0_7_6, 1_3_5_0, 3_7_8_6_7, 3_2_8_8, 5, 5_7_7, 1_0_7_6, 4_3_7_4, 8, 5_0_8_2, 5, 2_6_4_5_3, 2_5_7, 5_5_6, 4_0_3, 2, 2_4_2, 1_3_2, 3_8_3, 3_1_6, 4_9_2, 8, 1_0_7_6_7, 6, 3_1_6, 3_0_4, 4_2_3_9, 3, 0], [1_4_8, 1_5_7_2_2, 1_9, 1_8_3_9, 1_2, 1_3_5_0, 1_3, 2_2_3_2_7, 5_0_8_2, 5_4_1_8, 4_7_5_6_7, 3_5_9_3_8, 5_9, 3_1_8, 1_9_5_5_2, 1_0_8, 2_1_8_3, 5_4, 1_4_9_7_6, 4_8_3_5, 3_2, 5_4_7, 1_1_1_4, 8, 3_1_5, 2_4_1_7, 5, 9_2, 1_9_0_8_8, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0], [3_6, 6_3_9_5, 1_2_5_7_0, 3_9_1_4_7, 1_1_5_9_7, 6, 2_6_6, 4, 4_5_4_0_5, 7_2_9_6, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase , model_name='''Helsinki-NLP/opus-mt-en-de''' , revision='''1a8c2263da11e68e50938f97e10cd57820bd504c''' , decode_kwargs={'''use_source_tokenizer''': True} , )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = MarianTokenizer.from_pretrained('''hf-internal-testing/test-marian-two-vocabs''' )
lowerCamelCase__ = '''Tämä on testi'''
lowerCamelCase__ = '''This is a test'''
lowerCamelCase__ = [7_6, 7, 2_0_4_7, 2]
lowerCamelCase__ = [6_9, 1_2, 1_1, 9_4_0, 2]
lowerCamelCase__ = tokenizer(__lowerCAmelCase ).input_ids
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = tokenizer(text_target=__lowerCAmelCase ).input_ids
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
| 209 |
def lowerCAmelCase__(__snake_case ) -> str:
'''simple docstring'''
return "".join(chr(ord(__snake_case ) - 32 ) if '''a''' <= char <= '''z''' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 209 | 1 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: int = 50 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 ,5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 353 |
'''simple docstring'''
from sklearn.metrics import matthews_corrcoef
import datasets
UpperCamelCase_ = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
UpperCamelCase_ = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
UpperCamelCase_ = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ), reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'
], )
def UpperCamelCase_ ( self, A, A, A=None ):
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(A, A, sample_weight=A ) ),
}
| 246 | 0 |
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowerCAmelCase_ : str = logging.get_logger(__name__)
lowerCAmelCase_ : Tuple = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
lowerCAmelCase_ : Optional[int] = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : Optional[int] , lowercase : List[Any] , lowercase : str ) -> int:
for attribute in key.split("." ):
_a = getattr(lowercase , lowercase )
if weight_type is not None:
_a = getattr(lowercase , lowercase ).shape
else:
_a = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
_a = value
elif weight_type == "weight_g":
_a = value
elif weight_type == "weight_v":
_a = value
elif weight_type == "bias":
_a = value
else:
_a = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowerCamelCase ( lowercase : int , lowercase : str ) -> Optional[Any]:
_a = []
_a = fairseq_model.state_dict()
_a = hf_model.feature_extractor
for name, value in fairseq_dict.items():
_a = False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == "group" , )
_a = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_a = True
if "*" in mapped_key:
_a = name.split(lowercase )[0].split("." )[-2]
_a = mapped_key.replace("*" , lowercase )
if "weight_g" in name:
_a = "weight_g"
elif "weight_v" in name:
_a = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
_a = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_a = "weight"
else:
_a = None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(F'Unused weights: {unused_weights}' )
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Optional[Any] , lowercase : Union[str, Any] , lowercase : List[str] , lowercase : Dict ) -> Any:
_a = full_name.split("conv_layers." )[-1]
_a = name.split("." )
_a = int(items[0] )
_a = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
_a = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
_a = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
_a = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
_a = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def _lowerCamelCase ( lowercase : int , lowercase : str , lowercase : List[Any]=None ) -> Union[str, Any]:
# load the pre-trained checkpoints
_a = torch.load(lowercase )
_a = WavLMConfigOrig(checkpoint["cfg"] )
_a = WavLMOrig(lowercase )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
_a = WavLMConfig.from_pretrained(lowercase )
else:
_a = WavLMConfig()
_a = WavLMModel(lowercase )
recursively_load_weights(lowercase , lowercase )
hf_wavlm.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCAmelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
lowerCAmelCase_ : List[Any] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 63 |
def lowercase_ ( _A : int , _A : list ):
"""simple docstring"""
_enforce_args(_A , _A )
if n == 0:
return 0
lowerCamelCase__ : Union[str, Any] = float("-inf" )
for i in range(1 , n + 1 ):
lowerCamelCase__ : int = max(
_A , prices[i - 1] + naive_cut_rod_recursive(n - i , _A ) )
return max_revue
def lowercase_ ( _A : int , _A : list ):
"""simple docstring"""
_enforce_args(_A , _A )
lowerCamelCase__ : int = [float("-inf" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(_A , _A , _A )
def lowercase_ ( _A : int , _A : list , _A : list ):
"""simple docstring"""
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
lowerCamelCase__ : Dict = float("-inf" )
for i in range(1 , n + 1 ):
lowerCamelCase__ : int = max(
_A , prices[i - 1] + _top_down_cut_rod_recursive(n - i , _A , _A ) , )
lowerCamelCase__ : List[Any] = max_revenue
return max_rev[n]
def lowercase_ ( _A : int , _A : list ):
"""simple docstring"""
_enforce_args(_A , _A )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
lowerCamelCase__ : int = [float("-inf" ) for _ in range(n + 1 )]
lowerCamelCase__ : Optional[int] = 0
for i in range(1 , n + 1 ):
lowerCamelCase__ : Union[str, Any] = max_rev[i]
for j in range(1 , i + 1 ):
lowerCamelCase__ : Any = max(_A , prices[j - 1] + max_rev[i - j] )
lowerCamelCase__ : Any = max_revenue_i
return max_rev[n]
def lowercase_ ( _A : int , _A : list ):
"""simple docstring"""
if n < 0:
lowerCamelCase__ : Optional[int] = F"n must be greater than or equal to 0. Got n = {n}"
raise ValueError(_A )
if n > len(_A ):
lowerCamelCase__ : Optional[int] = (
"Each integral piece of rod must have a corresponding price. "
F"Got n = {n} but length of prices = {len(_A )}"
)
raise ValueError(_A )
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = [6, 10, 12, 15, 20, 23]
lowerCamelCase__ : Dict = len(_A )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
lowerCamelCase__ : int = 36
lowerCamelCase__ : List[Any] = top_down_cut_rod(_A , _A )
lowerCamelCase__ : List[Any] = bottom_up_cut_rod(_A , _A )
lowerCamelCase__ : List[Any] = naive_cut_rod_recursive(_A , _A )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 184 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCamelCase_ = {
"""configuration_speecht5""": [
"""SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP""",
"""SpeechT5Config""",
"""SpeechT5HifiGanConfig""",
],
"""feature_extraction_speecht5""": ["""SpeechT5FeatureExtractor"""],
"""processing_speecht5""": ["""SpeechT5Processor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ["""SpeechT5Tokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"""SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SpeechT5ForSpeechToText""",
"""SpeechT5ForSpeechToSpeech""",
"""SpeechT5ForTextToSpeech""",
"""SpeechT5Model""",
"""SpeechT5PreTrainedModel""",
"""SpeechT5HifiGan""",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 14 |
from maths.prime_factors import prime_factors
def lowerCamelCase ( a_ ) -> int:
if not isinstance(a_ , a_ ):
lowerCAmelCase_ = F'''Input value of [number={number}] must be an integer'''
raise TypeError(a_ )
if number < 1:
raise ValueError('Input must be a positive integer' )
return -1 if len(prime_factors(a_ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 1 |
from __future__ import annotations
from collections.abc import Iterator
class a__ :
def __init__( self , _A ):
"""simple docstring"""
__lowerCAmelCase = value
__lowerCAmelCase = None
__lowerCAmelCase = None
class a__ :
def __init__( self , _A ):
"""simple docstring"""
__lowerCAmelCase = tree
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ):
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 |
from __future__ import annotations
class _a :
def __init__( self: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: str ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = text, pattern
lowercase__ , lowercase__ = len(UpperCamelCase_ ), len(UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: str ) -> int:
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: int ) -> int:
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def lowerCamelCase_ ( self: List[Any] ) -> list[int]:
"""simple docstring"""
lowercase__ = []
for i in range(self.textLen - self.patLen + 1 ):
lowercase__ = self.mismatch_in_text(UpperCamelCase_ )
if mismatch_index == -1:
positions.append(UpperCamelCase_ )
else:
lowercase__ = self.match_in_pattern(self.text[mismatch_index] )
lowercase__ = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
lowerCAmelCase = 'ABAABA'
lowerCAmelCase = 'AB'
lowerCAmelCase = BoyerMooreSearch(text, pattern)
lowerCAmelCase = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 110 | 0 |
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> float:
'''simple docstring'''
if mass < 0:
raise ValueError('''The mass of a body cannot be negative''' )
return 0.5 * mass * abs(snake_case__ ) * abs(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 365 |
'''simple docstring'''
print((lambda quine: quine % quine)("print((lambda quine: quine %% quine)(%r))"))
| 83 | 0 |
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
lowercase__ : int = 2_9979_2458
# Symbols
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[Any] = symbols("ct x y z")
def lowerCamelCase__ ( _A ):
'''simple docstring'''
if velocity > c:
raise ValueError("Speed must not exceed light speed 299,792,458 [m/s]!" )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("Speed must be greater than or equal to 1!" )
return velocity / c
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return 1 / sqrt(1 - beta(_A ) ** 2 )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return np.array(
[
[gamma(_A ), -gamma(_A ) * beta(_A ), 0, 0],
[-gamma(_A ) * beta(_A ), gamma(_A ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowerCamelCase__ ( _A , _A = None ):
'''simple docstring'''
if event is None:
snake_case_ = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(_A ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
lowercase__ : Any = transform(2997_9245)
print("Example of four vector: ")
print(f'''ct\' = {four_vector[0]}''')
print(f'''x\' = {four_vector[1]}''')
print(f'''y\' = {four_vector[2]}''')
print(f'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
lowercase__ : Any = {ct: c, x: 1, y: 1, z: 1}
lowercase__ : Tuple = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f'''\n{numerical_vector}''')
| 187 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
lowercase__ : Tuple = re.compile(R"\b(a|an|the)\b", re.UNICODE)
lowercase__ : Optional[int] = None
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=_A , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=_A , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case_ = bool(qa["answers"]["text"] )
return qid_to_has_ans
def lowerCamelCase__ ( _A ):
'''simple docstring'''
def remove_articles(_A ):
return ARTICLES_REGEX.sub(" " , _A )
def white_space_fix(_A ):
return " ".join(text.split() )
def remove_punc(_A ):
snake_case_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_A ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_A ) ) ) )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
if not s:
return []
return normalize_answer(_A ).split()
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
return int(normalize_answer(_A ) == normalize_answer(_A ) )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
snake_case_ = get_tokens(_A )
snake_case_ = get_tokens(_A )
snake_case_ = collections.Counter(_A ) & collections.Counter(_A )
snake_case_ = sum(common.values() )
if len(_A ) == 0 or len(_A ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
snake_case_ = 1.0 * num_same / len(_A )
snake_case_ = 1.0 * num_same / len(_A )
snake_case_ = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
snake_case_ = {}
snake_case_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case_ = qa["id"]
snake_case_ = [t for t in qa["answers"]["text"] if normalize_answer(_A )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
snake_case_ = [""]
if qid not in preds:
print(f"Missing prediction for {qid}" )
continue
snake_case_ = preds[qid]
# Take max over all gold answers
snake_case_ = max(compute_exact(_A , _A ) for a in gold_answers )
snake_case_ = max(compute_fa(_A , _A ) for a in gold_answers )
return exact_scores, fa_scores
def lowerCamelCase__ ( _A , _A , _A , _A ):
'''simple docstring'''
snake_case_ = {}
for qid, s in scores.items():
snake_case_ = na_probs[qid] > na_prob_thresh
if pred_na:
snake_case_ = float(not qid_to_has_ans[qid] )
else:
snake_case_ = s
return new_scores
def lowerCamelCase__ ( _A , _A , _A=None ):
'''simple docstring'''
if not qid_list:
snake_case_ = len(_A )
return collections.OrderedDict(
[
("exact", 1_00.0 * sum(exact_scores.values() ) / total),
("f1", 1_00.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
snake_case_ = len(_A )
return collections.OrderedDict(
[
("exact", 1_00.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 1_00.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def lowerCamelCase__ ( _A , _A , _A ):
'''simple docstring'''
for k in new_eval:
snake_case_ = new_eval[k]
def lowerCamelCase__ ( _A , _A , _A , _A ):
'''simple docstring'''
plt.step(_A , _A , color="b" , alpha=0.2 , where="post" )
plt.fill_between(_A , _A , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(_A )
plt.savefig(_A )
plt.clf()
def lowerCamelCase__ ( _A , _A , _A , _A , _A=None , _A=None ):
'''simple docstring'''
snake_case_ = sorted(_A , key=lambda _A : na_probs[k] )
snake_case_ = 0.0
snake_case_ = 1.0
snake_case_ = 0.0
snake_case_ = [1.0]
snake_case_ = [0.0]
snake_case_ = 0.0
for i, qid in enumerate(_A ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
snake_case_ = true_pos / float(i + 1 )
snake_case_ = true_pos / float(_A )
if i == len(_A ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_A )
recalls.append(_A )
if out_image:
plot_pr_curve(_A , _A , _A , _A )
return {"ap": 1_00.0 * avg_prec}
def lowerCamelCase__ ( _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
if out_image_dir and not os.path.exists(_A ):
os.makedirs(_A )
snake_case_ = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
snake_case_ = make_precision_recall_eval(
_A , _A , _A , _A , out_image=os.path.join(_A , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
snake_case_ = make_precision_recall_eval(
_A , _A , _A , _A , out_image=os.path.join(_A , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
snake_case_ = {k: float(_A ) for k, v in qid_to_has_ans.items()}
snake_case_ = make_precision_recall_eval(
_A , _A , _A , _A , out_image=os.path.join(_A , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(_A , _A , "pr_exact" )
merge_eval(_A , _A , "pr_f1" )
merge_eval(_A , _A , "pr_oracle" )
def lowerCamelCase__ ( _A , _A , _A , _A ):
'''simple docstring'''
if not qid_list:
return
snake_case_ = [na_probs[k] for k in qid_list]
snake_case_ = np.ones_like(_A ) / float(len(_A ) )
plt.hist(_A , weights=_A , bins=20 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(f"Histogram of no-answer probability: {name}" )
plt.savefig(os.path.join(_A , f"na_prob_hist_{name}.png" ) )
plt.clf()
def lowerCamelCase__ ( _A , _A , _A , _A ):
'''simple docstring'''
snake_case_ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
snake_case_ = num_no_ans
snake_case_ = cur_score
snake_case_ = 0.0
snake_case_ = sorted(_A , key=lambda _A : na_probs[k] )
for i, qid in enumerate(_A ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
snake_case_ = scores[qid]
else:
if preds[qid]:
snake_case_ = -1
else:
snake_case_ = 0
cur_score += diff
if cur_score > best_score:
snake_case_ = cur_score
snake_case_ = na_probs[qid]
return 1_00.0 * best_score / len(_A ), best_thresh
def lowerCamelCase__ ( _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
snake_case_ , snake_case_ = find_best_thresh(_A , _A , _A , _A )
snake_case_ , snake_case_ = find_best_thresh(_A , _A , _A , _A )
snake_case_ = best_exact
snake_case_ = exact_thresh
snake_case_ = best_fa
snake_case_ = fa_thresh
def lowerCamelCase__ ( ):
'''simple docstring'''
with open(OPTS.data_file ) as f:
snake_case_ = json.load(_A )
snake_case_ = dataset_json["data"]
with open(OPTS.pred_file ) as f:
snake_case_ = json.load(_A )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
snake_case_ = json.load(_A )
else:
snake_case_ = {k: 0.0 for k in preds}
snake_case_ = make_qid_to_has_ans(_A ) # maps qid to True/False
snake_case_ = [k for k, v in qid_to_has_ans.items() if v]
snake_case_ = [k for k, v in qid_to_has_ans.items() if not v]
snake_case_ , snake_case_ = get_raw_scores(_A , _A )
snake_case_ = apply_no_ans_threshold(_A , _A , _A , OPTS.na_prob_thresh )
snake_case_ = apply_no_ans_threshold(_A , _A , _A , OPTS.na_prob_thresh )
snake_case_ = make_eval_dict(_A , _A )
if has_ans_qids:
snake_case_ = make_eval_dict(_A , _A , qid_list=_A )
merge_eval(_A , _A , "HasAns" )
if no_ans_qids:
snake_case_ = make_eval_dict(_A , _A , qid_list=_A )
merge_eval(_A , _A , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(_A , _A , _A , _A , _A , _A )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_A , _A , _A , _A , _A , OPTS.out_image_dir )
histogram_na_prob(_A , _A , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(_A , _A , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(_A , _A )
else:
print(json.dumps(_A , indent=2 ) )
if __name__ == "__main__":
lowercase__ : Union[str, Any] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 187 | 1 |
from __future__ import annotations
import requests
def __a ( lowerCAmelCase_ : str ) -> Any:
'''simple docstring'''
UpperCAmelCase_= F"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"""
return requests.get(__UpperCamelCase ).json()
def __a ( lowerCAmelCase_ : int = 10 ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_= """https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"""
UpperCAmelCase_= requests.get(__UpperCamelCase ).json()[:max_stories]
return [get_hackernews_story(__UpperCamelCase ) for story_id in story_ids]
def __a ( lowerCAmelCase_ : int = 10 ) -> int:
'''simple docstring'''
UpperCAmelCase_= hackernews_top_stories(__UpperCamelCase )
return "\n".join("""* [{title}]({url})""".format(**__UpperCamelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 369 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class lowercase ( unittest.TestCase):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
UpperCAmelCase_= tempfile.mkdtemp()
UpperCAmelCase_= BlipImageProcessor()
UpperCAmelCase_= GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
UpperCAmelCase_= BlipaProcessor(__UpperCAmelCase , __UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **__UpperCAmelCase : Union[str, Any] ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).tokenizer
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , **__UpperCAmelCase : str ) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).image_processor
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
UpperCAmelCase_= [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase_= [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
UpperCAmelCase_= BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_= self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCAmelCase_= self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
UpperCAmelCase_= BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
UpperCAmelCase_= self.get_image_processor()
UpperCAmelCase_= self.get_tokenizer()
UpperCAmelCase_= BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
UpperCAmelCase_= self.prepare_image_inputs()
UpperCAmelCase_= image_processor(__UpperCAmelCase , return_tensors="""np""" )
UpperCAmelCase_= processor(images=__UpperCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
UpperCAmelCase_= self.get_image_processor()
UpperCAmelCase_= self.get_tokenizer()
UpperCAmelCase_= BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
UpperCAmelCase_= """lower newer"""
UpperCAmelCase_= processor(text=__UpperCAmelCase )
UpperCAmelCase_= tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
UpperCAmelCase_= self.get_image_processor()
UpperCAmelCase_= self.get_tokenizer()
UpperCAmelCase_= BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
UpperCAmelCase_= """lower newer"""
UpperCAmelCase_= self.prepare_image_inputs()
UpperCAmelCase_= processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def _SCREAMING_SNAKE_CASE ( self : str ) -> Any:
UpperCAmelCase_= self.get_image_processor()
UpperCAmelCase_= self.get_tokenizer()
UpperCAmelCase_= BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
UpperCAmelCase_= [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_= processor.batch_decode(__UpperCAmelCase )
UpperCAmelCase_= tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
UpperCAmelCase_= self.get_image_processor()
UpperCAmelCase_= self.get_tokenizer()
UpperCAmelCase_= BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
UpperCAmelCase_= """lower newer"""
UpperCAmelCase_= self.prepare_image_inputs()
UpperCAmelCase_= processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 277 | 0 |
from __future__ import annotations
__lowerCAmelCase : int = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
__lowerCAmelCase : Any = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def __magic_name__ ( A : list[float] ):
'''simple docstring'''
a = []
a = len(A )
for i in range(A ):
a = -1
for j in range(i + 1, A ):
if arr[i] < arr[j]:
a = arr[j]
break
result.append(A )
return result
def __magic_name__ ( A : list[float] ):
'''simple docstring'''
a = []
for i, outer in enumerate(A ):
a = -1
for inner in arr[i + 1 :]:
if outer < inner:
a = inner
break
result.append(A )
return result
def __magic_name__ ( A : list[float] ):
'''simple docstring'''
a = len(A )
a = []
a = [-1] * arr_size
for index in reversed(range(A ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
a = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
__lowerCAmelCase : str = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 107 |
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class _UpperCAmelCase ( unittest.TestCase):
__a : Tuple = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def __snake_case ( self , _A , _A , _A ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : str = hf_hub_download(
repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
_UpperCAmelCase : Tuple = VideoClassificationPipeline(model=_A , image_processor=_A , top_k=2 )
_UpperCAmelCase : List[str] = [
example_video_filepath,
"""https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""",
]
return video_classifier, examples
def __snake_case ( self , _A , _A ) -> Optional[int]:
'''simple docstring'''
for example in examples:
_UpperCAmelCase : str = video_classifier(_A )
self.assertEqual(
_A , [
{"""score""": ANY(_A ), """label""": ANY(_A )},
{"""score""": ANY(_A ), """label""": ANY(_A )},
] , )
@require_torch
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification"""
_UpperCAmelCase : Optional[Any] = VideoMAEFeatureExtractor(
size={"""shortest_edge""": 10} , crop_size={"""height""": 10, """width""": 10} )
_UpperCAmelCase : List[str] = pipeline(
"""video-classification""" , model=_A , feature_extractor=_A , frame_sampling_rate=4 )
_UpperCAmelCase : Union[str, Any] = hf_hub_download(repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
_UpperCAmelCase : Union[str, Any] = video_classifier(_A , top_k=2 )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}] , )
_UpperCAmelCase : int = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
[{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}],
[{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}],
] , )
@require_tf
def __snake_case ( self ) -> Any:
'''simple docstring'''
pass
| 246 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ : int =logging.get_logger(__name__)
lowerCAmelCase__ : Union[str, Any] ='▁'
lowerCAmelCase__ : Dict ={'vocab_file': 'sentencepiece.bpe.model'}
lowerCAmelCase__ : Union[str, Any] ={
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
lowerCAmelCase__ : str ={
'facebook/xglm-564M': 2048,
}
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = VOCAB_FILES_NAMES
UpperCamelCase__ : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Union[str, Any] = ['''input_ids''', '''attention_mask''']
def __init__( self , _A , _A="<s>" , _A="</s>" , _A="</s>" , _A="<s>" , _A="<unk>" , _A="<pad>" , _A = None , **_A , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
__SCREAMING_SNAKE_CASE = 7
__SCREAMING_SNAKE_CASE = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
__SCREAMING_SNAKE_CASE = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
__SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_A ) )
__SCREAMING_SNAKE_CASE = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__SCREAMING_SNAKE_CASE = 1
# Mimic fairseq token-to-id alignment for the first 4 token
__SCREAMING_SNAKE_CASE = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
__SCREAMING_SNAKE_CASE = len(self.sp_model )
__SCREAMING_SNAKE_CASE = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(_A )
__SCREAMING_SNAKE_CASE = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.__dict__.copy()
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _A ( self , _A , _A = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def _A ( self , _A , _A = None , _A = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A ))
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A ))
def _A ( self , _A , _A = None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def _A ( self ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _A ( self , _A ):
'''simple docstring'''
return self.sp_model.encode(_A , out_type=_A )
def _A ( self , _A ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__SCREAMING_SNAKE_CASE = self.sp_model.PieceToId(_A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _A ( self , _A ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ''.join(_A ).replace(_A , ' ' ).strip()
return out_string
def _A ( self , _A , _A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__SCREAMING_SNAKE_CASE = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _A )
elif not os.path.isfile(self.vocab_file ):
with open(_A , 'wb' ) as fi:
__SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
| 359 |
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = ['''speech''']
def __init__( self , *_A , **_A ):
'''simple docstring'''
requires_backends(self , ['speech'] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = ['''speech''']
def __init__( self , *_A , **_A ):
'''simple docstring'''
requires_backends(self , ['speech'] )
| 118 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
_lowerCamelCase : List[str] = {
"""configuration_speecht5""": [
"""SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP""",
"""SpeechT5Config""",
"""SpeechT5HifiGanConfig""",
],
"""feature_extraction_speecht5""": ["""SpeechT5FeatureExtractor"""],
"""processing_speecht5""": ["""SpeechT5Processor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = ["""SpeechT5Tokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : str = [
"""SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SpeechT5ForSpeechToText""",
"""SpeechT5ForSpeechToSpeech""",
"""SpeechT5ForTextToSpeech""",
"""SpeechT5Model""",
"""SpeechT5PreTrainedModel""",
"""SpeechT5HifiGan""",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
_lowerCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 14 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[Any]:
"""simple docstring"""
A__ = args.log_outputs
A__ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
A__ = load_metric('''wer''' )
A__ = load_metric('''cer''' )
# compute metrics
A__ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
A__ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
A__ = f"""WER: {wer_result}\nCER: {cer_result}"""
print(lowercase_ )
with open(f"""{dataset_id}_eval_results.txt""" , '''w''' ) as f:
f.write(lowercase_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
A__ = f"""log_{dataset_id}_predictions.txt"""
A__ = f"""log_{dataset_id}_targets.txt"""
with open(lowercase_ , '''w''' ) as p, open(lowercase_ , '''w''' ) as t:
# mapping function to write output
def write_to_file(lowercase_ , lowercase_ ):
p.write(f"""{i}""" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(f"""{i}""" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(lowercase_ , with_indices=lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
"""simple docstring"""
A__ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
A__ = re.sub(lowercase_ , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
A__ = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
A__ = ''' '''.join(text.split(lowercase_ ) )
return text
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
"""simple docstring"""
A__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowercase_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
A__ = AutoFeatureExtractor.from_pretrained(args.model_id )
A__ = feature_extractor.sampling_rate
# resample audio
A__ = dataset.cast_column('''audio''' , Audio(sampling_rate=lowercase_ ) )
# load eval pipeline
if args.device is None:
A__ = 0 if torch.cuda.is_available() else -1
A__ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowercase_ ):
A__ = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
A__ = prediction['''text''']
A__ = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
A__ = dataset.map(lowercase_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(lowercase_ , lowercase_ )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
_lowerCamelCase : str = parser.parse_args()
main(args)
| 14 | 1 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __SCREAMING_SNAKE_CASE ( _a ):
@slow
@require_torch
def _lowerCamelCase ( self ):
UpperCamelCase__ = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
UpperCamelCase__ = BertTokenizer.from_pretrained("""bert-base-uncased""" )
UpperCamelCase__ = bertabert.config.encoder.vocab_size
UpperCamelCase__ = tokenizer.sep_token_id
UpperCamelCase__ = tokenizer.cls_token_id
UpperCamelCase__ = 128
UpperCamelCase__ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
UpperCamelCase__ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
UpperCamelCase__ = train_dataset.select(range(32 ) )
UpperCamelCase__ = val_dataset.select(range(16 ) )
UpperCamelCase__ = 4
def _map_to_encoder_decoder_inputs(__lowerCAmelCase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
UpperCamelCase__ = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=__lowerCAmelCase , max_length=512 )
UpperCamelCase__ = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=__lowerCAmelCase , max_length=128 )
UpperCamelCase__ = inputs.input_ids
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = outputs.input_ids
UpperCamelCase__ = outputs.input_ids.copy()
UpperCamelCase__ = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
UpperCamelCase__ = outputs.attention_mask
assert all(len(__lowerCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(__lowerCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(__lowerCAmelCase ):
UpperCamelCase__ = pred.label_ids
UpperCamelCase__ = pred.predictions
# all unnecessary tokens are removed
UpperCamelCase__ = tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
UpperCamelCase__ = tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
UpperCamelCase__ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__lowerCAmelCase ) )] ) / len(__lowerCAmelCase )
return {"accuracy": accuracy}
# map train dataset
UpperCamelCase__ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=__lowerCAmelCase , batch_size=__lowerCAmelCase , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
UpperCamelCase__ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=__lowerCAmelCase , batch_size=__lowerCAmelCase , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
UpperCamelCase__ = self.get_auto_remove_tmp_dir()
UpperCamelCase__ = SeqaSeqTrainingArguments(
output_dir=__lowerCAmelCase , per_device_train_batch_size=__lowerCAmelCase , per_device_eval_batch_size=__lowerCAmelCase , predict_with_generate=__lowerCAmelCase , evaluation_strategy="""steps""" , do_train=__lowerCAmelCase , do_eval=__lowerCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
UpperCamelCase__ = SeqaSeqTrainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , compute_metrics=_compute_metrics , train_dataset=__lowerCAmelCase , eval_dataset=__lowerCAmelCase , tokenizer=__lowerCAmelCase , )
# start training
trainer.train()
| 87 |
def _UpperCamelCase (a__ :str ):
"""simple docstring"""
UpperCamelCase__ = 0
# if input_string is "aba" than new_input_string become "a|b|a"
UpperCamelCase__ = """"""
UpperCamelCase__ = """"""
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(a__ ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
UpperCamelCase__ , UpperCamelCase__ = 0, 0
# length[i] shows the length of palindromic substring with center i
UpperCamelCase__ = [1 for i in range(len(a__ ) )]
# for each character in new_string find corresponding palindromic string
UpperCamelCase__ = 0
for j in range(len(a__ ) ):
UpperCamelCase__ = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(a__ )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
UpperCamelCase__ = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
UpperCamelCase__ = j - k + 1 # noqa: E741
UpperCamelCase__ = j + k - 1
# update max_length and start position
if max_length < length[j]:
UpperCamelCase__ = length[j]
UpperCamelCase__ = j
# create that string
UpperCamelCase__ = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class _UpperCAmelCase ( snake_case_ , snake_case_ ):
"""simple docstring"""
snake_case = '''bit'''
snake_case = ['''preactivation''', '''bottleneck''']
snake_case = ['''SAME''', '''VALID''']
def __init__( self : List[str] , __UpperCAmelCase : Optional[Any]=3 , __UpperCAmelCase : int=64 , __UpperCAmelCase : List[str]=[256, 512, 1024, 2048] , __UpperCAmelCase : Any=[3, 4, 6, 3] , __UpperCAmelCase : List[Any]="preactivation" , __UpperCAmelCase : Tuple="relu" , __UpperCAmelCase : int=None , __UpperCAmelCase : int=32 , __UpperCAmelCase : List[str]=0.0 , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : Dict=32 , __UpperCAmelCase : List[str]=1 , __UpperCAmelCase : int=None , __UpperCAmelCase : Tuple=None , **__UpperCAmelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
_A = global_padding.upper()
else:
raise ValueError(f'''Padding strategy {global_padding} not supported''' )
_A = num_channels
_A = embedding_size
_A = hidden_sizes
_A = depths
_A = layer_type
_A = hidden_act
_A = global_padding
_A = num_groups
_A = drop_path_rate
_A = embedding_dynamic_padding
_A = output_stride
_A = width_factor
_A = ["stem"] + [f'''stage{idx}''' for idx in range(1 , len(__UpperCAmelCase ) + 1 )]
_A , _A = get_aligned_output_features_output_indices(
out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names )
| 79 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
snake_case_ : Any = [
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def A__ ( ):
_UpperCamelCase : Tuple = Github(os.environ['GITHUB_TOKEN'] )
_UpperCamelCase : List[Any] = g.get_repo('huggingface/diffusers' )
_UpperCamelCase : List[Any] = repo.get_issues(state='open' )
for issue in open_issues:
_UpperCamelCase : Dict = sorted(issue.get_comments() , key=lambda UpperCAmelCase_ : i.created_at , reverse=UpperCAmelCase_ )
_UpperCamelCase : List[str] = comments[0] if len(UpperCAmelCase_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 83 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A_ )
class UpperCAmelCase_ ( A_ ):
lowercase__ = field(default='''language-modeling''', metadata={'''include_in_asdict_even_if_is_default''': True} )
lowercase__ = Features({'''text''': Value('''string''' )} )
lowercase__ = Features({} )
lowercase__ = "text"
@property
def __magic_name__ ( self : Optional[int] ) -> Dict[str, str]:
'''simple docstring'''
return {self.text_column: "text"}
| 230 |
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
def __init__( self : Tuple , snake_case_ : Optional[Any] , snake_case_ : Optional[Any]=13 , snake_case_ : Optional[int]=7 , snake_case_ : Any=True , snake_case_ : List[Any]=True , snake_case_ : Dict=True , snake_case_ : Dict=True , snake_case_ : List[str]=99 , snake_case_ : Union[str, Any]=16 , snake_case_ : Any=36 , snake_case_ : List[Any]=6 , snake_case_ : Optional[Any]=6 , snake_case_ : Optional[Any]=6 , snake_case_ : Any=37 , snake_case_ : int="gelu" , snake_case_ : Any=0.1 , snake_case_ : int=0.1 , snake_case_ : str=512 , snake_case_ : Union[str, Any]=16 , snake_case_ : Tuple=2 , snake_case_ : Any=0.02 , snake_case_ : Optional[Any]=3 , snake_case_ : Union[str, Any]=4 , snake_case_ : Any=None , ) -> Tuple:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = embedding_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_hidden_groups
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
def __magic_name__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self : int ) -> Dict:
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def __magic_name__ ( self : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : List[str] , snake_case_ : List[str] , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
A__ = AlbertModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
A__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
A__ = model(snake_case_ , token_type_ids=snake_case_ )
A__ = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__ ( self : int , snake_case_ : int , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ) -> int:
'''simple docstring'''
A__ = AlbertForPreTraining(config=snake_case_ )
model.to(snake_case_ )
model.eval()
A__ = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , sentence_order_label=snake_case_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def __magic_name__ ( self : str , snake_case_ : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : List[str] ) -> Dict:
'''simple docstring'''
A__ = AlbertForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
A__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : int , snake_case_ : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Tuple , snake_case_ : List[str] , snake_case_ : List[str] ) -> Tuple:
'''simple docstring'''
A__ = AlbertForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
A__ = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self : Any , snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : str , snake_case_ : Any ) -> Any:
'''simple docstring'''
A__ = self.num_labels
A__ = AlbertForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
A__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : Tuple , snake_case_ : Dict , snake_case_ : int , snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : str , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
A__ = self.num_labels
A__ = AlbertForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
A__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : Any , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Any ) -> Optional[Any]:
'''simple docstring'''
A__ = self.num_choices
A__ = AlbertForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
A__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self : Any ) -> List[str]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
(
(
A__
), (
A__
), (
A__
), (
A__
), (
A__
), (
A__
), (
A__
),
) = config_and_inputs
A__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( A_, A_, unittest.TestCase ):
lowercase__ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase__ = (
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = True
def __magic_name__ ( self : Tuple , snake_case_ : Dict , snake_case_ : Optional[Any] , snake_case_ : List[Any]=False ) -> Optional[int]:
'''simple docstring'''
A__ = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
if return_labels:
if model_class in get_values(snake_case_ ):
A__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case_ )
A__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case_ )
return inputs_dict
def __magic_name__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
A__ = AlbertModelTester(self )
A__ = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def __magic_name__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self : Any ) -> List[str]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __magic_name__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case_ )
def __magic_name__ ( self : int ) -> str:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def __magic_name__ ( self : Any ) -> str:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case_ )
def __magic_name__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def __magic_name__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def __magic_name__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ = type
self.model_tester.create_and_check_model(*snake_case_ )
@slow
def __magic_name__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = AlbertModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def __magic_name__ ( self : int ) -> Tuple:
'''simple docstring'''
A__ = AlbertModel.from_pretrained("albert-base-v2" )
A__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
A__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A__ = model(snake_case_ , attention_mask=snake_case_ )[0]
A__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , snake_case_ )
A__ = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case_ , atol=1e-4 ) )
| 230 | 1 |
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , ):
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a_ :int = {
"configuration_longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config", "LongT5OnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[str] = [
"LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongT5EncoderModel",
"LongT5ForConditionalGeneration",
"LongT5Model",
"LongT5PreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :int = [
"FlaxLongT5ForConditionalGeneration",
"FlaxLongT5Model",
"FlaxLongT5PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
a_ :Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 277 | 0 |
"""simple docstring"""
from manim import *
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Any = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase__ : int = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase__ : Union[str, Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Tuple = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Dict = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
UpperCAmelCase__ : List[Any] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = VGroup(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
UpperCAmelCase__ : str = Text("""CPU""" , font_size=24 )
UpperCAmelCase__ : Tuple = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = [mem.copy() for i in range(1 )]
UpperCAmelCase__ : Tuple = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
UpperCAmelCase__ : Dict = Text("""GPU""" , font_size=24 )
UpperCAmelCase__ : str = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
gpu.align_to(_lowerCamelCase , _lowerCamelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(_lowerCamelCase )
UpperCAmelCase__ : List[str] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Optional[int] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
UpperCAmelCase__ : List[Any] = Text("""Model""" , font_size=24 )
UpperCAmelCase__ : Optional[Any] = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(_lowerCamelCase , run_time=1 ) , Create(_lowerCamelCase , run_time=1 ) , Create(_lowerCamelCase , run_time=1 ) , )
UpperCAmelCase__ : Dict = MarkupText(
F"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , )
UpperCAmelCase__ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase__ : Optional[Any] = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCamelCase , run_time=2.5 ) , Write(_lowerCamelCase ) , Write(_lowerCamelCase ) )
self.add(_lowerCamelCase )
UpperCAmelCase__ : Tuple = []
UpperCAmelCase__ : List[str] = []
UpperCAmelCase__ : List[Any] = []
for i, rect in enumerate(_lowerCamelCase ):
UpperCAmelCase__ : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowerCamelCase , opacity=0.7 )
cpu_target.move_to(_lowerCamelCase )
cpu_target.generate_target()
UpperCAmelCase__ : int = 0.46 / 4
UpperCAmelCase__ : List[str] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_lowerCamelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=_lowerCamelCase , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=_lowerCamelCase , buff=0.0 )
cpu_targs.append(_lowerCamelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_lowerCamelCase ) )
second_animations.append(MoveToTarget(_lowerCamelCase , run_time=1.5 ) )
self.play(*_lowerCamelCase )
self.play(*_lowerCamelCase )
self.wait()
| 367 |
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
_A = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _a (self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = True , ):
"""simple docstring"""
UpperCAmelCase__ : Dict = [file for file in os.listdir(_lowerCamelCase ) if os.path.isfile(os.path.join(_lowerCamelCase , _lowerCamelCase ) )]
if identifier is not None:
UpperCAmelCase__ : List[str] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
for n_ in n_identifier:
UpperCAmelCase__ : Optional[int] = [file for file in files if n_ not in file]
else:
UpperCAmelCase__ : List[Any] = [file for file in files if n_identifier not in file]
UpperCAmelCase__ : str = ignore_files or []
ignore_files.append("""__init__.py""" )
UpperCAmelCase__ : Any = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , _lowerCamelCase )
if only_modules:
UpperCAmelCase__ : List[str] = file.split(""".""" )[0]
try:
UpperCAmelCase__ : Any = getattr(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Optional[int] = doctest.DocTestSuite(_lowerCamelCase )
UpperCAmelCase__ : List[Any] = unittest.TextTestRunner().run(_lowerCamelCase )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F"""{module_identifier} is not a module.""" )
else:
UpperCAmelCase__ : Tuple = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = Path("""src/transformers""" )
UpperCAmelCase__ : Union[str, Any] = """modeling"""
UpperCAmelCase__ : int = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(_lowerCamelCase , identifier=_lowerCamelCase , ignore_files=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = Path("""src/transformers""" )
UpperCAmelCase__ : List[Any] = """tokenization"""
self.analyze_directory(_lowerCamelCase , identifier=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : int = Path("""src/transformers""" )
UpperCAmelCase__ : Optional[int] = """configuration"""
self.analyze_directory(_lowerCamelCase , identifier=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = Path("""src/transformers""" )
UpperCAmelCase__ : List[Any] = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(_lowerCamelCase , n_identifier=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = Path("""docs/source""" )
UpperCAmelCase__ : str = ["""favicon.ico"""]
self.analyze_directory(_lowerCamelCase , ignore_files=_lowerCamelCase , only_modules=_lowerCamelCase )
| 166 | 0 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = 42
class lowerCAmelCase ( nn.Module ):
def __init__( self : Tuple , UpperCAmelCase : Dict=3 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : Optional[Any]=("DownEncoderBlock2D",) , UpperCAmelCase : List[str]=(64,) , UpperCAmelCase : Any=2 , UpperCAmelCase : List[Any]=32 , UpperCAmelCase : List[str]="silu" , UpperCAmelCase : Tuple=True , ) -> Tuple:
super().__init__()
lowerCamelCase__ : Optional[Any] = layers_per_block
lowerCamelCase__ : Dict = torch.nn.Convad(
UpperCAmelCase , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
lowerCamelCase__ : Tuple = None
lowerCamelCase__ : Union[str, Any] = nn.ModuleList([] )
# down
lowerCamelCase__ : Tuple = block_out_channels[0]
for i, down_block_type in enumerate(UpperCAmelCase ):
lowerCamelCase__ : List[Any] = output_channel
lowerCamelCase__ : Dict = block_out_channels[i]
lowerCamelCase__ : int = i == len(UpperCAmelCase ) - 1
lowerCamelCase__ : int = get_down_block(
UpperCAmelCase , num_layers=self.layers_per_block , in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=UpperCAmelCase , resnet_groups=UpperCAmelCase , attention_head_dim=UpperCAmelCase , temb_channels=UpperCAmelCase , )
self.down_blocks.append(UpperCAmelCase )
# mid
lowerCamelCase__ : Tuple = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCAmelCase , output_scale_factor=1 , resnet_time_scale_shift='default' , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCAmelCase , temb_channels=UpperCAmelCase , )
# out
lowerCamelCase__ : Optional[Any] = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=UpperCAmelCase , eps=1e-6 )
lowerCamelCase__ : List[Any] = nn.SiLU()
lowerCamelCase__ : List[str] = 2 * out_channels if double_z else out_channels
lowerCamelCase__ : Any = nn.Convad(block_out_channels[-1] , UpperCAmelCase , 3 , padding=1 )
lowerCamelCase__ : List[Any] = False
def A_ ( self : Dict , UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
lowerCamelCase__ : Dict = x
lowerCamelCase__ : Dict = self.conv_in(UpperCAmelCase )
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCAmelCase : Dict ):
def custom_forward(*UpperCAmelCase : Any ):
return module(*UpperCAmelCase )
return custom_forward
# down
if is_torch_version('>=' , '1.11.0' ):
for down_block in self.down_blocks:
lowerCamelCase__ : Optional[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCAmelCase ) , UpperCAmelCase , use_reentrant=UpperCAmelCase )
# middle
lowerCamelCase__ : Tuple = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCAmelCase , use_reentrant=UpperCAmelCase )
else:
for down_block in self.down_blocks:
lowerCamelCase__ : str = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCAmelCase ) , UpperCAmelCase )
# middle
lowerCamelCase__ : Dict = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , UpperCAmelCase )
else:
# down
for down_block in self.down_blocks:
lowerCamelCase__ : Optional[Any] = down_block(UpperCAmelCase )
# middle
lowerCamelCase__ : List[str] = self.mid_block(UpperCAmelCase )
# post-process
lowerCamelCase__ : str = self.conv_norm_out(UpperCAmelCase )
lowerCamelCase__ : List[Any] = self.conv_act(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = self.conv_out(UpperCAmelCase )
return sample
class lowerCAmelCase ( nn.Module ):
def __init__( self : Any , UpperCAmelCase : Optional[int]=3 , UpperCAmelCase : List[Any]=3 , UpperCAmelCase : int=("UpDecoderBlock2D",) , UpperCAmelCase : Optional[Any]=(64,) , UpperCAmelCase : str=2 , UpperCAmelCase : Union[str, Any]=32 , UpperCAmelCase : int="silu" , UpperCAmelCase : List[str]="group" , ) -> Any:
super().__init__()
lowerCamelCase__ : List[str] = layers_per_block
lowerCamelCase__ : Optional[Any] = nn.Convad(
UpperCAmelCase , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
lowerCamelCase__ : Dict = None
lowerCamelCase__ : List[str] = nn.ModuleList([] )
lowerCamelCase__ : Dict = in_channels if norm_type == 'spatial' else None
# mid
lowerCamelCase__ : List[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCAmelCase , output_scale_factor=1 , resnet_time_scale_shift='default' if norm_type == 'group' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCAmelCase , temb_channels=UpperCAmelCase , )
# up
lowerCamelCase__ : Tuple = list(reversed(UpperCAmelCase ) )
lowerCamelCase__ : Dict = reversed_block_out_channels[0]
for i, up_block_type in enumerate(UpperCAmelCase ):
lowerCamelCase__ : Dict = output_channel
lowerCamelCase__ : List[Any] = reversed_block_out_channels[i]
lowerCamelCase__ : Union[str, Any] = i == len(UpperCAmelCase ) - 1
lowerCamelCase__ : Union[str, Any] = get_up_block(
UpperCAmelCase , num_layers=self.layers_per_block + 1 , in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , prev_output_channel=UpperCAmelCase , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=UpperCAmelCase , resnet_groups=UpperCAmelCase , attention_head_dim=UpperCAmelCase , temb_channels=UpperCAmelCase , resnet_time_scale_shift=UpperCAmelCase , )
self.up_blocks.append(UpperCAmelCase )
lowerCamelCase__ : str = output_channel
# out
if norm_type == "spatial":
lowerCamelCase__ : Any = SpatialNorm(block_out_channels[0] , UpperCAmelCase )
else:
lowerCamelCase__ : Optional[Any] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=UpperCAmelCase , eps=1e-6 )
lowerCamelCase__ : Tuple = nn.SiLU()
lowerCamelCase__ : Union[str, Any] = nn.Convad(block_out_channels[0] , UpperCAmelCase , 3 , padding=1 )
lowerCamelCase__ : Dict = False
def A_ ( self : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : Any=None ) -> List[str]:
lowerCamelCase__ : Any = z
lowerCamelCase__ : Union[str, Any] = self.conv_in(UpperCAmelCase )
lowerCamelCase__ : Optional[int] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCAmelCase : Any ):
def custom_forward(*UpperCAmelCase : Any ):
return module(*UpperCAmelCase )
return custom_forward
if is_torch_version('>=' , '1.11.0' ):
# middle
lowerCamelCase__ : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCAmelCase , UpperCAmelCase , use_reentrant=UpperCAmelCase )
lowerCamelCase__ : Tuple = sample.to(UpperCAmelCase )
# up
for up_block in self.up_blocks:
lowerCamelCase__ : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCAmelCase ) , UpperCAmelCase , UpperCAmelCase , use_reentrant=UpperCAmelCase )
else:
# middle
lowerCamelCase__ : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Dict = sample.to(UpperCAmelCase )
# up
for up_block in self.up_blocks:
lowerCamelCase__ : Tuple = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCAmelCase ) , UpperCAmelCase , UpperCAmelCase )
else:
# middle
lowerCamelCase__ : Any = self.mid_block(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Optional[int] = sample.to(UpperCAmelCase )
# up
for up_block in self.up_blocks:
lowerCamelCase__ : List[Any] = up_block(UpperCAmelCase , UpperCAmelCase )
# post-process
if latent_embeds is None:
lowerCamelCase__ : str = self.conv_norm_out(UpperCAmelCase )
else:
lowerCamelCase__ : Optional[Any] = self.conv_norm_out(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : List[str] = self.conv_act(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = self.conv_out(UpperCAmelCase )
return sample
class lowerCAmelCase ( nn.Module ):
def __init__( self : Dict , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str=None , UpperCAmelCase : Optional[Any]="random" , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : List[Any]=True ) -> Optional[int]:
super().__init__()
lowerCamelCase__ : int = n_e
lowerCamelCase__ : Any = vq_embed_dim
lowerCamelCase__ : Optional[int] = beta
lowerCamelCase__ : Optional[Any] = legacy
lowerCamelCase__ : Any = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
lowerCamelCase__ : Any = remap
if self.remap is not None:
self.register_buffer('used' , torch.tensor(np.load(self.remap ) ) )
lowerCamelCase__ : str = self.used.shape[0]
lowerCamelCase__ : Any = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
lowerCamelCase__ : List[Any] = self.re_embed
lowerCamelCase__ : List[Any] = self.re_embed + 1
print(
F"""Remapping {self.n_e} indices to {self.re_embed} indices. """
F"""Using {self.unknown_index} for unknown indices.""" )
else:
lowerCamelCase__ : List[Any] = n_e
lowerCamelCase__ : Optional[Any] = sane_index_shape
def A_ ( self : Union[str, Any] , UpperCAmelCase : Any ) -> Dict:
lowerCamelCase__ : Any = inds.shape
assert len(UpperCAmelCase ) > 1
lowerCamelCase__ : str = inds.reshape(ishape[0] , -1 )
lowerCamelCase__ : str = self.used.to(UpperCAmelCase )
lowerCamelCase__ : Dict = (inds[:, :, None] == used[None, None, ...]).long()
lowerCamelCase__ : Optional[int] = match.argmax(-1 )
lowerCamelCase__ : Dict = match.sum(2 ) < 1
if self.unknown_index == "random":
lowerCamelCase__ : Optional[int] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
lowerCamelCase__ : int = self.unknown_index
return new.reshape(UpperCAmelCase )
def A_ ( self : List[str] , UpperCAmelCase : Tuple ) -> Optional[int]:
lowerCamelCase__ : List[Any] = inds.shape
assert len(UpperCAmelCase ) > 1
lowerCamelCase__ : List[Any] = inds.reshape(ishape[0] , -1 )
lowerCamelCase__ : str = self.used.to(UpperCAmelCase )
if self.re_embed > self.used.shape[0]: # extra token
lowerCamelCase__ : Any = 0 # simply set to zero
lowerCamelCase__ : Any = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , UpperCAmelCase )
return back.reshape(UpperCAmelCase )
def A_ ( self : Tuple , UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
# reshape z -> (batch, height, width, channel) and flatten
lowerCamelCase__ : Tuple = z.permute(0 , 2 , 3 , 1 ).contiguous()
lowerCamelCase__ : List[Any] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
lowerCamelCase__ : List[str] = torch.argmin(torch.cdist(UpperCAmelCase , self.embedding.weight ) , dim=1 )
lowerCamelCase__ : Optional[Any] = self.embedding(UpperCAmelCase ).view(z.shape )
lowerCamelCase__ : int = None
lowerCamelCase__ : Union[str, Any] = None
# compute loss for embedding
if not self.legacy:
lowerCamelCase__ : Dict = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
lowerCamelCase__ : List[str] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
lowerCamelCase__ : Union[str, Any] = z + (z_q - z).detach()
# reshape back to match original input shape
lowerCamelCase__ : Any = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
lowerCamelCase__ : int = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
lowerCamelCase__ : Union[str, Any] = self.remap_to_used(UpperCAmelCase )
lowerCamelCase__ : Optional[int] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
lowerCamelCase__ : Any = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def A_ ( self : int , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] ) -> List[Any]:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
lowerCamelCase__ : str = indices.reshape(shape[0] , -1 ) # add batch axis
lowerCamelCase__ : Optional[Any] = self.unmap_to_all(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
lowerCamelCase__ : str = self.embedding(UpperCAmelCase )
if shape is not None:
lowerCamelCase__ : List[str] = z_q.view(UpperCAmelCase )
# reshape back to match original input shape
lowerCamelCase__ : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class lowerCAmelCase ( __UpperCamelCase ):
def __init__( self : Any , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any]=False ) -> List[str]:
lowerCamelCase__ : List[str] = parameters
lowerCamelCase__ , lowerCamelCase__ : List[str] = torch.chunk(UpperCAmelCase , 2 , dim=1 )
lowerCamelCase__ : Union[str, Any] = torch.clamp(self.logvar , -3_0.0 , 2_0.0 )
lowerCamelCase__ : List[Any] = deterministic
lowerCamelCase__ : Optional[int] = torch.exp(0.5 * self.logvar )
lowerCamelCase__ : Optional[Any] = torch.exp(self.logvar )
if self.deterministic:
lowerCamelCase__ : str = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def A_ ( self : List[str] , UpperCAmelCase : Optional[torch.Generator] = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
lowerCamelCase__ : Any = randn_tensor(
self.mean.shape , generator=UpperCAmelCase , device=self.parameters.device , dtype=self.parameters.dtype )
lowerCamelCase__ : Optional[int] = self.mean + self.std * sample
return x
def A_ ( self : Dict , UpperCAmelCase : Optional[Any]=None ) -> Optional[Any]:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def A_ ( self : str , UpperCAmelCase : int , UpperCAmelCase : List[Any]=[1, 2, 3] ) -> List[str]:
if self.deterministic:
return torch.Tensor([0.0] )
lowerCamelCase__ : int = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=UpperCAmelCase )
def A_ ( self : Optional[int] ) -> List[str]:
return self.mean
| 50 | import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = CpmAntTokenizer
lowerCamelCase__ = False
def __A ( self : List[str] ) -> str:
super().setUp()
SCREAMING_SNAKE_CASE_ = [
"<d>",
"</d>",
"<s>",
"</s>",
"</_>",
"<unk>",
"<pad>",
"</n>",
"我",
"是",
"C",
"P",
"M",
"A",
"n",
"t",
]
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
@tooslow
def __A ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b" )
SCREAMING_SNAKE_CASE_ = "今天天气真好!"
SCREAMING_SNAKE_CASE_ = ["今天", "天气", "真", "好", "!"]
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = "今天天气真好!"
SCREAMING_SNAKE_CASE_ = [tokenizer.bos_token] + tokens
SCREAMING_SNAKE_CASE_ = [6, 9_802, 14_962, 2_082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.decode(__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
| 118 | 0 |
'''simple docstring'''
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Any=None , **UpperCAmelCase_ : str ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = parent
__UpperCAmelCase : Optional[Any] = config_class
__UpperCAmelCase : str = has_text_modality
__UpperCAmelCase : List[str] = kwargs
__UpperCAmelCase : Dict = common_properties
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase : str = self.config_class(**self.inputs_dict )
__UpperCAmelCase : Optional[int] = (
["hidden_size", "num_attention_heads", "num_hidden_layers"]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["vocab_size"] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) , msg=f"`{prop}` does not exist" )
# Test that config has the common properties as setter
for idx, name in enumerate(UpperCAmelCase_ ):
try:
setattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
self.parent.assertEqual(
getattr(UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ , msg=f"`{name} value {idx} expected, but was {getattr(UpperCAmelCase_ , UpperCAmelCase_ )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(UpperCAmelCase_ ):
try:
__UpperCAmelCase : Optional[int] = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ , msg=f"`{name} value {idx} expected, but was {getattr(UpperCAmelCase_ , UpperCAmelCase_ )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
__UpperCAmelCase : Tuple = self.config_class(**self.inputs_dict )
__UpperCAmelCase : Optional[int] = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase : Any = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : List[Any] = os.path.join(UpperCAmelCase_ , "config.json" )
config_first.to_json_file(UpperCAmelCase_ )
__UpperCAmelCase : int = self.config_class.from_json_file(UpperCAmelCase_ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
__UpperCAmelCase : int = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(UpperCAmelCase_ )
__UpperCAmelCase : Tuple = self.config_class.from_pretrained(UpperCAmelCase_ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
__UpperCAmelCase : Dict = self.config_class(**self.inputs_dict )
__UpperCAmelCase : str = "test"
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Any = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
config_first.save_pretrained(UpperCAmelCase_ )
__UpperCAmelCase : int = self.config_class.from_pretrained(UpperCAmelCase_ , subfolder=UpperCAmelCase_ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
__UpperCAmelCase : List[Any] = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
if self.config_class.is_composition:
return
__UpperCAmelCase : List[str] = self.config_class()
self.parent.assertIsNotNone(UpperCAmelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase : List[str] = copy.deepcopy(UpperCAmelCase_ )
__UpperCAmelCase : List[str] = self.config_class(**UpperCAmelCase_ )
__UpperCAmelCase : List[Any] = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("torch_dtype", config.torch_dtype, torch.floataa) )
elif getattr(UpperCAmelCase_ , UpperCAmelCase_ ) != value:
wrong_values.append((key, getattr(UpperCAmelCase_ , UpperCAmelCase_ ), value) )
if len(UpperCAmelCase_ ) > 0:
__UpperCAmelCase : Tuple = "\n".join([f"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] )
raise ValueError(f"The following keys were not properly set in the config:\n{errors}" )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 37 |
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
lowerCAmelCase__ : str = logging.get_logger(__name__)
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( snake_case__ ,snake_case__ ,unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = UNetaDModel
SCREAMING_SNAKE_CASE = '''sample'''
@property
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
__UpperCAmelCase : Dict = 4
__UpperCAmelCase : Dict = 3
__UpperCAmelCase : Dict = (32, 32)
__UpperCAmelCase : Any = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase_ )
__UpperCAmelCase : str = torch.tensor([10] ).to(UpperCAmelCase_ )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
return (3, 32, 32)
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = {
"block_out_channels": (32, 64),
"down_block_types": ("DownBlock2D", "AttnDownBlock2D"),
"up_block_types": ("AttnUpBlock2D", "UpBlock2D"),
"attention_head_dim": 3,
"out_channels": 3,
"in_channels": 3,
"layers_per_block": 2,
"sample_size": 32,
}
__UpperCAmelCase : List[str] = self.dummy_input
return init_dict, inputs_dict
class SCREAMING_SNAKE_CASE__ ( snake_case__ ,snake_case__ ,unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = UNetaDModel
SCREAMING_SNAKE_CASE = '''sample'''
@property
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : str = 4
__UpperCAmelCase : Dict = 4
__UpperCAmelCase : Optional[int] = (32, 32)
__UpperCAmelCase : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase_ )
__UpperCAmelCase : List[Any] = torch.tensor([10] ).to(UpperCAmelCase_ )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return (4, 32, 32)
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
return (4, 32, 32)
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
__UpperCAmelCase : Dict = {
"sample_size": 32,
"in_channels": 4,
"out_channels": 4,
"layers_per_block": 2,
"block_out_channels": (32, 64),
"attention_head_dim": 32,
"down_block_types": ("DownBlock2D", "DownBlock2D"),
"up_block_types": ("UpBlock2D", "UpBlock2D"),
}
__UpperCAmelCase : List[Any] = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : str = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(UpperCAmelCase_ )
__UpperCAmelCase : List[str] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU" )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : List[str] = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU" )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
__UpperCAmelCase , __UpperCAmelCase : str = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=UpperCAmelCase_ )
model_accelerate.to(UpperCAmelCase_ )
model_accelerate.eval()
__UpperCAmelCase : Optional[Any] = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
__UpperCAmelCase : int = noise.to(UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = torch.tensor([10] * noise.shape[0] ).to(UpperCAmelCase_ )
__UpperCAmelCase : Optional[Any] = model_accelerate(UpperCAmelCase_ , UpperCAmelCase_ )["sample"]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
__UpperCAmelCase , __UpperCAmelCase : str = UNetaDModel.from_pretrained(
"fusing/unet-ldm-dummy-update" , output_loading_info=UpperCAmelCase_ , low_cpu_mem_usage=UpperCAmelCase_ )
model_normal_load.to(UpperCAmelCase_ )
model_normal_load.eval()
__UpperCAmelCase : Optional[Any] = model_normal_load(UpperCAmelCase_ , UpperCAmelCase_ )["sample"]
assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1e-3 )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
__UpperCAmelCase : Tuple = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" )
model.eval()
model.to(UpperCAmelCase_ )
__UpperCAmelCase : Tuple = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__UpperCAmelCase : Optional[int] = noise.to(UpperCAmelCase_ )
__UpperCAmelCase : Tuple = torch.tensor([10] * noise.shape[0] ).to(UpperCAmelCase_ )
with torch.no_grad():
__UpperCAmelCase : Dict = model(UpperCAmelCase_ , UpperCAmelCase_ ).sample
__UpperCAmelCase : Optional[int] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
__UpperCAmelCase : int = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1e-3 ) )
class SCREAMING_SNAKE_CASE__ ( snake_case__ ,snake_case__ ,unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = UNetaDModel
SCREAMING_SNAKE_CASE = '''sample'''
@property
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase_ : List[str]=(32, 32) ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = 4
__UpperCAmelCase : Tuple = 3
__UpperCAmelCase : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase_ )
__UpperCAmelCase : Tuple = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=UpperCAmelCase_ )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
return (3, 32, 32)
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = {
"block_out_channels": [32, 64, 64, 64],
"in_channels": 3,
"layers_per_block": 1,
"out_channels": 3,
"time_embedding_type": "fourier",
"norm_eps": 1e-6,
"mid_block_scale_factor": math.sqrt(2.0 ),
"norm_num_groups": None,
"down_block_types": [
"SkipDownBlock2D",
"AttnSkipDownBlock2D",
"SkipDownBlock2D",
"SkipDownBlock2D",
],
"up_block_types": [
"SkipUpBlock2D",
"SkipUpBlock2D",
"AttnSkipUpBlock2D",
"SkipUpBlock2D",
],
}
__UpperCAmelCase : int = self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : int = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" , output_loading_info=UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(UpperCAmelCase_ )
__UpperCAmelCase : Any = self.dummy_input
__UpperCAmelCase : int = floats_tensor((4, 3) + (256, 256) ).to(UpperCAmelCase_ )
__UpperCAmelCase : List[Any] = noise
__UpperCAmelCase : Optional[Any] = model(**UpperCAmelCase_ )
assert image is not None, "Make sure output is not None"
@slow
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
__UpperCAmelCase : Any = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" )
model.to(UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = 4
__UpperCAmelCase : Optional[int] = 3
__UpperCAmelCase : int = (256, 256)
__UpperCAmelCase : Optional[Any] = torch.ones((batch_size, num_channels) + sizes ).to(UpperCAmelCase_ )
__UpperCAmelCase : Dict = torch.tensor(batch_size * [1e-4] ).to(UpperCAmelCase_ )
with torch.no_grad():
__UpperCAmelCase : Tuple = model(UpperCAmelCase_ , UpperCAmelCase_ ).sample
__UpperCAmelCase : List[str] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
__UpperCAmelCase : Tuple = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -10980.7129, -20028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1e-2 ) )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
__UpperCAmelCase : Dict = UNetaDModel.from_pretrained("fusing/ncsnpp-ffhq-ve-dummy-update" )
model.to(UpperCAmelCase_ )
__UpperCAmelCase : Tuple = 4
__UpperCAmelCase : Union[str, Any] = 3
__UpperCAmelCase : Union[str, Any] = (32, 32)
__UpperCAmelCase : Optional[Any] = torch.ones((batch_size, num_channels) + sizes ).to(UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = torch.tensor(batch_size * [1e-4] ).to(UpperCAmelCase_ )
with torch.no_grad():
__UpperCAmelCase : str = model(UpperCAmelCase_ , UpperCAmelCase_ ).sample
__UpperCAmelCase : Dict = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
__UpperCAmelCase : Any = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1e-2 ) )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
# not required for this model
pass
| 37 | 1 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def lowercase_ ( _lowerCamelCase : str = ""):
lowercase__ : str = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250"
lowercase__ : int = BeautifulSoup(requests.get(_lowerCamelCase).text , "html.parser")
lowercase__ : int = soup.find_all("td" , attrs="titleColumn")
lowercase__ : Any = soup.find_all("td" , class_="ratingColumn imdbRating")
return {
title.a.text: float(rating.strong.text)
for title, rating in zip(_lowerCamelCase , _lowerCamelCase)
}
def lowercase_ ( _lowerCamelCase : str = "IMDb_Top_250_Movies.csv"):
lowercase__ : Dict = get_imdb_top_aaa_movies()
with open(_lowerCamelCase , "w" , newline="") as out_file:
lowercase__ : Optional[int] = csv.writer(_lowerCamelCase)
writer.writerow(["Movie title", "IMDb rating"])
for title, rating in movies.items():
writer.writerow([title, rating])
if __name__ == "__main__":
write_movies()
| 87 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 87 | 1 |
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
lowercase : str = logging.getLogger(__name__)
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Any = argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' , type=snake_case__ , default='''data/dump.txt''' , help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' , type=snake_case__ , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' , type=snake_case__ , default='''bert-base-uncased''' , help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' , type=snake_case__ , default='''data/dump''' , help='''The dump file prefix.''' )
A : List[str] = parser.parse_args()
logger.info(F'Loading Tokenizer ({args.tokenizer_name})' )
if args.tokenizer_type == "bert":
A : Optional[int] = BertTokenizer.from_pretrained(args.tokenizer_name )
A : Optional[int] = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
A : List[str] = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
A : Optional[int] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
A : Optional[Any] = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
A : Any = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
A : Any = GPTaTokenizer.from_pretrained(args.tokenizer_name )
A : str = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
A : Optional[int] = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(F'Loading text from {args.file_path}' )
with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp:
A : Optional[int] = fp.readlines()
logger.info('''Start encoding''' )
logger.info(F'{len(snake_case__ )} examples to process.' )
A : List[str] = []
A : str = 0
A : Optional[int] = 1_0000
A : Any = time.time()
for text in data:
A : int = F'{bos} {text.strip()} {sep}'
A : Dict = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
rslt.append(snake_case__ )
iter += 1
if iter % interval == 0:
A : int = time.time()
logger.info(F'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' )
A : int = time.time()
logger.info('''Finished binarization''' )
logger.info(F'{len(snake_case__ )} examples processed.' )
A : str = F'{args.dump_file}.{args.tokenizer_name}.pickle'
A : List[Any] = tokenizer.vocab_size
if vocab_size < (1 << 16):
A : Optional[Any] = [np.uintaa(snake_case__ ) for d in rslt]
else:
A : Dict = [np.intaa(snake_case__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'Dump to {dp_file}' )
with open(snake_case__ , '''wb''' ) as handle:
pickle.dump(rslt_ , snake_case__ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 311 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A ( __snake_case ):
__magic_name__ = (UniPCMultistepScheduler,)
__magic_name__ = (('''num_inference_steps''', 25),)
def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
A : str = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''solver_type''': '''bh2''',
}
config.update(**SCREAMING_SNAKE_CASE )
return config
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
A : List[Any] = dict(self.forward_default_kwargs )
A : Union[str, Any] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE )
A : Optional[Any] = self.dummy_sample
A : int = 0.1 * sample
A : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A : Optional[Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
A : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
A : List[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE )
A : List[Any] = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
A : Dict = dummy_past_residuals[: new_scheduler.config.solver_order]
A, A : Tuple = sample, sample
for t in range(SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ):
A : Any = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
A : Optional[Any] = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
A : Optional[Any] = dict(self.forward_default_kwargs )
A : Tuple = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE )
A : List[Any] = self.dummy_sample
A : int = 0.1 * sample
A : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A : Optional[int] = self.get_scheduler_config()
A : Any = scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
A : int = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE )
A : int = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
A : Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
A : List[Any] = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
if scheduler is None:
A : Dict = self.scheduler_classes[0]
A : Union[str, Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
A : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE )
A : Tuple = self.scheduler_classes[0]
A : Union[str, Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
A : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE )
A : int = 10
A : Tuple = self.dummy_model()
A : Any = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
A : int = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
return sample
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Tuple = dict(self.forward_default_kwargs )
A : List[Any] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE )
for scheduler_class in self.scheduler_classes:
A : Dict = self.get_scheduler_config()
A : Dict = scheduler_class(**SCREAMING_SNAKE_CASE )
A : Optional[Any] = self.dummy_sample
A : Optional[int] = 0.1 * sample
if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
A : Tuple = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A : Dict = [residual + 0.2, residual + 0.15, residual + 0.10]
A : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
A : List[Any] = scheduler.timesteps[5]
A : Dict = scheduler.timesteps[6]
A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Union[str, Any] = UniPCMultistepScheduler(**self.get_scheduler_config() )
A : List[Any] = self.full_loop(scheduler=SCREAMING_SNAKE_CASE )
A : List[str] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
A : Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config )
A : Optional[int] = DEISMultistepScheduler.from_config(scheduler.config )
A : List[Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
A : List[Any] = UniPCMultistepScheduler.from_config(scheduler.config )
A : Optional[Any] = self.full_loop(scheduler=SCREAMING_SNAKE_CASE )
A : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , sample_max_value=SCREAMING_SNAKE_CASE , solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , )
A : Dict = self.full_loop(
solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , )
assert not torch.isnan(SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers"
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE )
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE , time_step=0 )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : int = self.full_loop()
A : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : List[Any] = self.full_loop(prediction_type='''v_prediction''' )
A : Any = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.1_014 ) < 1e-3
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Dict = self.scheduler_classes[0]
A : List[Any] = self.get_scheduler_config(thresholding=SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 )
A : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE )
A : Tuple = 10
A : Union[str, Any] = self.dummy_model()
A : Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
A : Dict = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
assert sample.dtype == torch.floataa
def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
for scheduler_class in self.scheduler_classes:
A : Dict = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
A : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 311 | 1 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
A__ = {'''tokenization_byt5''': ['''ByT5Tokenizer''']}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
A__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 230 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
A__ = '''http://www.mocksite.com/file1.txt'''
A__ = '''"text": ["foo", "foo"]'''
A__ = '''6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'''
class a :
__lowerCAmelCase : Optional[int] = 2_00
__lowerCAmelCase : List[str] = {"""Content-Length""": """100"""}
__lowerCAmelCase : Dict = {}
def __lowerCamelCase ( self :Dict ,**__lowercase :List[Any] ):
return [bytes(__lowercase ,'''utf-8''' )]
def _lowerCAmelCase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize('''urls_type''' , [str, list, dict] )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
"""simple docstring"""
import requests
monkeypatch.setattr(__lowerCAmelCase , '''request''' , __lowerCAmelCase )
snake_case__ : Union[str, Any] = URL
if issubclass(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : Optional[Any] = url
elif issubclass(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : int = [url]
elif issubclass(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : int = {'''train''': url}
snake_case__ : Dict = '''dummy'''
snake_case__ : Any = '''downloads'''
snake_case__ : int = tmp_path
snake_case__ : Any = DownloadConfig(
cache_dir=os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , use_etag=__lowerCAmelCase , )
snake_case__ : Tuple = DownloadManager(dataset_name=__lowerCAmelCase , download_config=__lowerCAmelCase )
snake_case__ : List[Any] = dl_manager.download(__lowerCAmelCase )
snake_case__ : Union[str, Any] = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : Optional[int] = [downloaded_paths]
snake_case__ : Dict = [urls]
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
assert "train" in downloaded_paths.keys()
snake_case__ : str = downloaded_paths.values()
snake_case__ : List[str] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(__lowerCAmelCase , __lowerCAmelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
snake_case__ : List[Any] = Path(__lowerCAmelCase )
snake_case__ : Any = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
snake_case__ : List[str] = downloaded_path.read_text()
assert content == CONTENT
snake_case__ : List[str] = downloaded_path.with_suffix('''.json''' )
assert metadata_downloaded_path.exists()
snake_case__ : str = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('''paths_type''' , [str, list, dict] )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
"""simple docstring"""
snake_case__ : Any = str(__lowerCAmelCase )
if issubclass(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : Tuple = filename
elif issubclass(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : Dict = [filename]
elif issubclass(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : Dict = {'''train''': filename}
snake_case__ : Any = '''dummy'''
snake_case__ : Any = xz_file.parent
snake_case__ : List[str] = '''extracted'''
snake_case__ : Dict = DownloadConfig(
cache_dir=__lowerCAmelCase , use_etag=__lowerCAmelCase , )
snake_case__ : Dict = DownloadManager(dataset_name=__lowerCAmelCase , download_config=__lowerCAmelCase )
snake_case__ : str = dl_manager.extract(__lowerCAmelCase )
snake_case__ : int = paths
for extracted_paths in [extracted_paths]:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : Dict = [extracted_paths]
snake_case__ : Optional[Any] = [paths]
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
assert "train" in extracted_paths.keys()
snake_case__ : int = extracted_paths.values()
snake_case__ : int = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(__lowerCAmelCase , __lowerCAmelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
snake_case__ : Optional[int] = Path(__lowerCAmelCase )
snake_case__ : int = extracted_path.parts
assert parts[-1] == hash_url_to_filename(__lowerCAmelCase , etag=__lowerCAmelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
snake_case__ : List[Any] = extracted_path.read_text()
snake_case__ : List[str] = text_file.read_text()
assert extracted_file_content == expected_file_content
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
assert path.endswith('''.jsonl''' )
for num_items, line in enumerate(__lowerCAmelCase , start=1 ):
snake_case__ : Any = json.loads(line.decode('''utf-8''' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('''archive_jsonl''' , ['''tar_jsonl_path''', '''zip_jsonl_path'''] )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Any = request.getfixturevalue(__lowerCAmelCase )
snake_case__ : Union[str, Any] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__lowerCAmelCase ) , start=1 ):
_test_jsonl(__lowerCAmelCase , __lowerCAmelCase )
assert num_jsonl == 2
@pytest.mark.parametrize('''archive_nested_jsonl''' , ['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : Union[str, Any] = request.getfixturevalue(__lowerCAmelCase )
snake_case__ : Optional[int] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__lowerCAmelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__lowerCAmelCase ) , start=1 ):
_test_jsonl(__lowerCAmelCase , __lowerCAmelCase )
assert num_tar == 1
assert num_jsonl == 2
def _lowerCAmelCase ( __lowerCAmelCase ) -> str:
"""simple docstring"""
snake_case__ : Any = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(__lowerCAmelCase ) , start=1 ):
assert os.path.basename(__lowerCAmelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 230 | 1 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__UpperCamelCase = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "ernie_m"
SCREAMING_SNAKE_CASE_ = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self, lowerCAmelCase__ = 25_0002, lowerCAmelCase__ = 768, lowerCAmelCase__ = 12, lowerCAmelCase__ = 12, lowerCAmelCase__ = 3072, lowerCAmelCase__ = "gelu", lowerCAmelCase__ = 0.1, lowerCAmelCase__ = 0.1, lowerCAmelCase__ = 514, lowerCAmelCase__ = 0.02, lowerCAmelCase__ = 1, lowerCAmelCase__ = 1e-05, lowerCAmelCase__=None, lowerCAmelCase__=False, lowerCAmelCase__=0.0, **lowerCAmelCase__, ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCAmelCase__, **lowerCAmelCase__)
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = classifier_dropout
snake_case_ = is_decoder
snake_case_ = act_dropout
| 350 | """simple docstring"""
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__UpperCamelCase = False
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = '''ybelkada/fonts'''
def UpperCAmelCase ( ) -> Dict:
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
f'You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '
'Pix2StructImageProcessor. Please upgrade torch.' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
requires_backends(UpperCAmelCase , ['torch'] )
_check_torch_version()
snake_case_ = image_tensor.unsqueeze(0 )
snake_case_ = torch.nn.functional.unfold(UpperCAmelCase , (patch_height, patch_width) , stride=(patch_height, patch_width) )
snake_case_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , UpperCAmelCase , UpperCAmelCase , -1 )
snake_case_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase = 36 , UpperCAmelCase = "black" , UpperCAmelCase = "white" , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = None , UpperCAmelCase = None , ) -> Image.Image:
requires_backends(UpperCAmelCase , 'vision' )
# Add new lines so that each line is no more than 80 characters.
snake_case_ = textwrap.TextWrapper(width=80 )
snake_case_ = wrapper.wrap(text=UpperCAmelCase )
snake_case_ = '\n'.join(UpperCAmelCase )
if font_bytes is not None and font_path is None:
snake_case_ = io.BytesIO(UpperCAmelCase )
elif font_path is not None:
snake_case_ = font_path
else:
snake_case_ = hf_hub_download(UpperCAmelCase , 'Arial.TTF' )
snake_case_ = ImageFont.truetype(UpperCAmelCase , encoding='UTF-8' , size=UpperCAmelCase )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
snake_case_ = ImageDraw.Draw(Image.new('RGB' , (1, 1) , UpperCAmelCase ) )
snake_case_ , snake_case_ , snake_case_ , snake_case_ = temp_draw.textbbox((0, 0) , UpperCAmelCase , UpperCAmelCase )
# Create the actual image with a bit of padding around the text.
snake_case_ = text_width + left_padding + right_padding
snake_case_ = text_height + top_padding + bottom_padding
snake_case_ = Image.new('RGB' , (image_width, image_height) , UpperCAmelCase )
snake_case_ = ImageDraw.Draw(UpperCAmelCase )
draw.text(xy=(left_padding, top_padding) , text=UpperCAmelCase , fill=UpperCAmelCase , font=UpperCAmelCase )
return image
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Any:
requires_backends(UpperCAmelCase , 'vision' )
# Convert to PIL image if necessary
snake_case_ = to_pil_image(UpperCAmelCase )
snake_case_ = render_text(UpperCAmelCase , **UpperCAmelCase )
snake_case_ = max(header_image.width , image.width )
snake_case_ = int(image.height * (new_width / image.width) )
snake_case_ = int(header_image.height * (new_width / header_image.width) )
snake_case_ = Image.new('RGB' , (new_width, new_height + new_header_height) , 'white' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
snake_case_ = to_numpy_array(UpperCAmelCase )
if infer_channel_dimension_format(UpperCAmelCase ) == ChannelDimension.LAST:
snake_case_ = to_channel_dimension_format(UpperCAmelCase , ChannelDimension.LAST )
return new_image
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = ["flattened_patches"]
def __init__( self, lowerCAmelCase__ = True, lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = 2048, lowerCAmelCase__ = False, **lowerCAmelCase__, ) -> None:
super().__init__(**lowerCAmelCase__)
snake_case_ = patch_size if patch_size is not None else {'height': 16, 'width': 16}
snake_case_ = do_normalize
snake_case_ = do_convert_rgb
snake_case_ = max_patches
snake_case_ = is_vqa
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__) -> np.ndarray:
requires_backends(self.extract_flattened_patches, 'torch')
_check_torch_version()
# convert to torch
snake_case_ = to_channel_dimension_format(lowerCAmelCase__, ChannelDimension.FIRST)
snake_case_ = torch.from_numpy(lowerCAmelCase__)
snake_case_ , snake_case_ = patch_size['height'], patch_size['width']
snake_case_ , snake_case_ = get_image_size(lowerCAmelCase__)
# maximize scale s.t.
snake_case_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width))
snake_case_ = max(min(math.floor(scale * image_height / patch_height), lowerCAmelCase__), 1)
snake_case_ = max(min(math.floor(scale * image_width / patch_width), lowerCAmelCase__), 1)
snake_case_ = max(num_feasible_rows * patch_height, 1)
snake_case_ = max(num_feasible_cols * patch_width, 1)
snake_case_ = torch.nn.functional.interpolate(
image.unsqueeze(0), size=(resized_height, resized_width), mode='bilinear', align_corners=lowerCAmelCase__, antialias=lowerCAmelCase__, ).squeeze(0)
# [1, rows, columns, patch_height * patch_width * image_channels]
snake_case_ = torch_extract_patches(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = patches.shape
snake_case_ = patches_shape[1]
snake_case_ = patches_shape[2]
snake_case_ = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
snake_case_ = patches.reshape([rows * columns, depth])
# [rows * columns, 1]
snake_case_ = torch.arange(lowerCAmelCase__).reshape([rows, 1]).repeat(1, lowerCAmelCase__).reshape([rows * columns, 1])
snake_case_ = torch.arange(lowerCAmelCase__).reshape([1, columns]).repeat(lowerCAmelCase__, 1).reshape([rows * columns, 1])
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
snake_case_ = row_ids.to(torch.floataa)
snake_case_ = col_ids.to(torch.floataa)
# [rows * columns, 2 + patch_height * patch_width * image_channels]
snake_case_ = torch.cat([row_ids, col_ids, patches], -1)
# [max_patches, 2 + patch_height * patch_width * image_channels]
snake_case_ = torch.nn.functional.pad(lowerCAmelCase__, [0, 0, 0, max_patches - (rows * columns)]).float()
snake_case_ = to_numpy_array(lowerCAmelCase__)
return result
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, **lowerCAmelCase__) -> np.ndarray:
if image.dtype == np.uinta:
snake_case_ = image.astype(np.floataa)
# take mean across the whole `image`
snake_case_ = np.mean(lowerCAmelCase__)
snake_case_ = np.std(lowerCAmelCase__)
snake_case_ = max(lowerCAmelCase__, 1.0 / math.sqrt(np.prod(image.shape)))
return normalize(lowerCAmelCase__, mean=lowerCAmelCase__, std=lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = ChannelDimension.FIRST, **lowerCAmelCase__, ) -> ImageInput:
snake_case_ = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case_ = patch_size if patch_size is not None else self.patch_size
snake_case_ = max_patches if max_patches is not None else self.max_patches
snake_case_ = self.is_vqa
if kwargs.get('data_format', lowerCAmelCase__) is not None:
raise ValueError('data_format is not an accepted input as the outputs are ')
snake_case_ = make_list_of_images(lowerCAmelCase__)
if not valid_images(lowerCAmelCase__):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case_ = [convert_to_rgb(lowerCAmelCase__) for image in images]
# All transformations expect numpy arrays.
snake_case_ = [to_numpy_array(lowerCAmelCase__) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('A header text must be provided for VQA models.')
snake_case_ = kwargs.pop('font_bytes', lowerCAmelCase__)
snake_case_ = kwargs.pop('font_path', lowerCAmelCase__)
if isinstance(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = [header_text] * len(lowerCAmelCase__)
snake_case_ = [
render_header(lowerCAmelCase__, header_text[i], font_bytes=lowerCAmelCase__, font_path=lowerCAmelCase__)
for i, image in enumerate(lowerCAmelCase__)
]
if do_normalize:
snake_case_ = [self.normalize(image=lowerCAmelCase__) for image in images]
# convert to torch tensor and permute
snake_case_ = [
self.extract_flattened_patches(image=lowerCAmelCase__, max_patches=lowerCAmelCase__, patch_size=lowerCAmelCase__)
for image in images
]
# create attention mask in numpy
snake_case_ = [(image.sum(axis=-1) != 0).astype(np.floataa) for image in images]
snake_case_ = BatchFeature(
data={'flattened_patches': images, 'attention_mask': attention_masks}, tensor_type=lowerCAmelCase__)
return encoded_outputs
| 312 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {'''configuration_mbart''': ['''MBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MBartConfig''', '''MBartOnnxConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['''MBartTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['''MBartTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''MBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MBartForCausalLM''',
'''MBartForConditionalGeneration''',
'''MBartForQuestionAnswering''',
'''MBartForSequenceClassification''',
'''MBartModel''',
'''MBartPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''TFMBartForConditionalGeneration''',
'''TFMBartModel''',
'''TFMBartPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''FlaxMBartForConditionalGeneration''',
'''FlaxMBartForQuestionAnswering''',
'''FlaxMBartForSequenceClassification''',
'''FlaxMBartModel''',
'''FlaxMBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 345 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCamelCase = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"""
def _A ( ):
"""simple docstring"""
__lowercase =_ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__lowercase =get_sagemaker_input()
else:
__lowercase =get_cluster_input()
return config
def _A ( _lowerCAmelCase=None ):
"""simple docstring"""
if subparsers is not None:
__lowercase =subparsers.add_parser('config' , description=_lowerCAmelCase )
else:
__lowercase =argparse.ArgumentParser('Accelerate config command' , description=_lowerCAmelCase )
parser.add_argument(
'--config_file' , default=_lowerCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=_lowerCAmelCase )
return parser
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =get_user_input()
if args.config_file is not None:
__lowercase =args.config_file
else:
if not os.path.isdir(_lowerCAmelCase ):
os.makedirs(_lowerCAmelCase )
__lowercase =default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(_lowerCAmelCase )
else:
config.to_yaml_file(_lowerCAmelCase )
print(f"""accelerate configuration saved at {config_file}""" )
def _A ( ):
"""simple docstring"""
__lowercase =config_command_parser()
__lowercase =parser.parse_args()
config_command(_lowerCAmelCase )
if __name__ == "__main__":
main()
| 166 | 0 |
from typing import Any
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCamelCase : Dict = data
UpperCamelCase : str = None
def __repr__( self ) -> str:
return F"""Node({self.data})"""
class lowerCAmelCase_ :
def __init__( self ) -> Any:
UpperCamelCase : str = None
def __iter__( self ) -> Any:
UpperCamelCase : Tuple = self.head
while node:
yield node.data
UpperCamelCase : List[Any] = node.next
def __len__( self ) -> int:
return sum(1 for _ in self )
def __repr__( self ) -> str:
return "->".join([str(SCREAMING_SNAKE_CASE_ ) for item in self] )
def __getitem__( self, SCREAMING_SNAKE_CASE_ ) -> Any:
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None:
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
UpperCamelCase : List[str] = self.head
for _ in range(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Union[str, Any] = current.next
UpperCamelCase : Dict = data
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> None:
self.insert_nth(len(self ), SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> None:
self.insert_nth(0, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None:
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
UpperCamelCase : Any = Node(SCREAMING_SNAKE_CASE_ )
if self.head is None:
UpperCamelCase : Optional[int] = new_node
elif index == 0:
UpperCamelCase : Any = self.head # link new_node to head
UpperCamelCase : Tuple = new_node
else:
UpperCamelCase : str = self.head
for _ in range(index - 1 ):
UpperCamelCase : Dict = temp.next
UpperCamelCase : int = temp.next
UpperCamelCase : Optional[int] = new_node
def snake_case_ ( self ) -> None: # print every node data
print(self )
def snake_case_ ( self ) -> Any:
return self.delete_nth(0 )
def snake_case_ ( self ) -> Any: # delete from tail
return self.delete_nth(len(self ) - 1 )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ = 0 ) -> Any:
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
UpperCamelCase : List[str] = self.head # default first node
if index == 0:
UpperCamelCase : int = self.head.next
else:
UpperCamelCase : List[str] = self.head
for _ in range(index - 1 ):
UpperCamelCase : str = temp.next
UpperCamelCase : Tuple = temp.next
UpperCamelCase : Any = temp.next.next
return delete_node.data
def snake_case_ ( self ) -> bool:
return self.head is None
def snake_case_ ( self ) -> None:
UpperCamelCase : int = None
UpperCamelCase : Optional[Any] = self.head
while current:
# Store the current node's next node.
UpperCamelCase : str = current.next
# Make the current node's next point backwards
UpperCamelCase : List[str] = prev
# Make the previous node be the current node
UpperCamelCase : Tuple = current
# Make the current node the next node (to progress iteration)
UpperCamelCase : Optional[Any] = next_node
# Return prev in order to put the head at the end
UpperCamelCase : List[Any] = prev
def UpperCamelCase ( ) -> None:
UpperCamelCase : Union[str, Any] = LinkedList()
assert linked_list.is_empty() is True
assert str(snake_case__ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(snake_case__ ) == i
linked_list.insert_nth(snake_case__ , i + 1 )
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(snake_case__ ) == 9
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
UpperCamelCase : Union[str, Any] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(-8 , 1 ) )
def UpperCamelCase ( ) -> None:
UpperCamelCase : Optional[Any] = [
-9,
100,
Node(77345112 ),
'dlrow olleH',
7,
5555,
0,
-192.55555,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
UpperCamelCase : Any = LinkedList()
for i in test_input:
linked_list.insert_tail(snake_case__ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(snake_case__ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
UpperCamelCase : Dict = linked_list.delete_head()
assert result == -9
assert (
str(snake_case__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
UpperCamelCase : Any = linked_list.delete_tail()
assert result == 12.2
assert (
str(snake_case__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
UpperCamelCase : Tuple = linked_list.delete_nth(10 )
assert result is None
assert (
str(snake_case__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(snake_case__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(snake_case__ )
assert (
str(snake_case__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(snake_case__ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def UpperCamelCase ( ) -> int:
from doctest import testmod
testmod()
UpperCamelCase : Union[str, Any] = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(snake_case__ )
print('\nReading/changing Node data using indexing:' )
print(F"""Element at Position 1: {linked_list[1]}""" )
UpperCamelCase : int = input('Enter New Value: ' ).strip()
print('New list:' )
print(snake_case__ )
print(F"""length of linked_list is : {len(snake_case__ )}""" )
if __name__ == "__main__":
main()
| 103 |
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
__UpperCAmelCase , __UpperCAmelCase = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
__UpperCAmelCase = rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
__UpperCAmelCase = rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
__UpperCAmelCase = args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F"""pip install -r transformers/examples/{example_dir}/requirements.txt"""])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F"""python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}"""])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 103 | 1 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Tuple = (DDPMScheduler,)
def UpperCAmelCase_ ( self ,**__UpperCAmelCase ) -> Tuple:
lowerCAmelCase__ : Dict = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**__UpperCAmelCase )
return config
def UpperCAmelCase_ ( self ) -> str:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] ,[0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=__UpperCAmelCase ,beta_end=__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> int:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> int:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
self.check_over_configs(thresholding=__UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__UpperCAmelCase ,prediction_type=__UpperCAmelCase ,sample_max_value=__UpperCAmelCase ,)
def UpperCAmelCase_ ( self ) -> int:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Optional[int]:
for t in [0, 500, 999]:
self.check_over_forward(time_step=__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
lowerCAmelCase__ : List[Any] = self.scheduler_classes[0]
lowerCAmelCase__ : Optional[Any] = self.get_scheduler_config()
lowerCAmelCase__ : Dict = scheduler_class(**__UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1E-5
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
lowerCAmelCase__ : List[str] = self.scheduler_classes[0]
lowerCAmelCase__ : int = self.get_scheduler_config()
lowerCAmelCase__ : List[Any] = scheduler_class(**__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = len(__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = self.dummy_model()
lowerCAmelCase__ : str = self.dummy_sample_deter
lowerCAmelCase__ : List[str] = torch.manual_seed(0 )
for t in reversed(range(__UpperCAmelCase ) ):
# 1. predict noise residual
lowerCAmelCase__ : int = model(__UpperCAmelCase ,__UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase__ : Union[str, Any] = scheduler.step(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,generator=__UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase__ : List[Any] = pred_prev_sample
lowerCAmelCase__ : List[Any] = torch.sum(torch.abs(__UpperCAmelCase ) )
lowerCAmelCase__ : List[Any] = torch.mean(torch.abs(__UpperCAmelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1E-3
def UpperCAmelCase_ ( self ) -> Optional[int]:
lowerCAmelCase__ : int = self.scheduler_classes[0]
lowerCAmelCase__ : List[str] = self.get_scheduler_config(prediction_type="""v_prediction""" )
lowerCAmelCase__ : List[str] = scheduler_class(**__UpperCAmelCase )
lowerCAmelCase__ : List[str] = len(__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = self.dummy_model()
lowerCAmelCase__ : Tuple = self.dummy_sample_deter
lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(__UpperCAmelCase ) ):
# 1. predict noise residual
lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase ,__UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase__ : List[str] = scheduler.step(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,generator=__UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase__ : str = pred_prev_sample
lowerCAmelCase__ : List[Any] = torch.sum(torch.abs(__UpperCAmelCase ) )
lowerCAmelCase__ : Any = torch.mean(torch.abs(__UpperCAmelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1E-3
def UpperCAmelCase_ ( self ) -> Optional[Any]:
lowerCAmelCase__ : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase__ : List[Any] = self.get_scheduler_config()
lowerCAmelCase__ : Optional[Any] = scheduler_class(**__UpperCAmelCase )
lowerCAmelCase__ : str = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__UpperCAmelCase )
lowerCAmelCase__ : str = scheduler.timesteps
for i, timestep in enumerate(__UpperCAmelCase ):
if i == len(__UpperCAmelCase ) - 1:
lowerCAmelCase__ : Union[str, Any] = -1
else:
lowerCAmelCase__ : List[str] = timesteps[i + 1]
lowerCAmelCase__ : str = scheduler.previous_timestep(__UpperCAmelCase )
lowerCAmelCase__ : Dict = prev_t.item()
self.assertEqual(__UpperCAmelCase ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Optional[int]:
lowerCAmelCase__ : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase__ : Optional[int] = self.get_scheduler_config()
lowerCAmelCase__ : Any = scheduler_class(**__UpperCAmelCase )
lowerCAmelCase__ : Tuple = [100, 87, 50, 51, 0]
with self.assertRaises(__UpperCAmelCase ,msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
lowerCAmelCase__ : Dict = self.scheduler_classes[0]
lowerCAmelCase__ : List[str] = self.get_scheduler_config()
lowerCAmelCase__ : Tuple = scheduler_class(**__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = [100, 87, 50, 1, 0]
lowerCAmelCase__ : Union[str, Any] = len(__UpperCAmelCase )
with self.assertRaises(__UpperCAmelCase ,msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=__UpperCAmelCase ,timesteps=__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
lowerCAmelCase__ : List[Any] = self.scheduler_classes[0]
lowerCAmelCase__ : int = self.get_scheduler_config()
lowerCAmelCase__ : Tuple = scheduler_class(**__UpperCAmelCase )
lowerCAmelCase__ : List[str] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__UpperCAmelCase ,msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" ,):
scheduler.set_timesteps(timesteps=__UpperCAmelCase )
| 37 |
'''simple docstring'''
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
try:
with open(UpperCamelCase , """rb""" ) as flax_state_f:
lowerCAmelCase__ : Union[str, Any] = from_bytes(UpperCamelCase , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(UpperCamelCase ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f"""Unable to convert {model_file} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(UpperCamelCase , UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
lowerCAmelCase__ : str = flatten_dict(jax.tree_util.tree_map(lambda UpperCamelCase : x.dtype == jnp.bfloataa , UpperCamelCase ) ).values()
if any(UpperCamelCase ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
lowerCAmelCase__ : Dict = jax.tree_util.tree_map(
lambda UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , UpperCamelCase )
lowerCAmelCase__ : Any = """"""
lowerCAmelCase__ : Any = flatten_dict(UpperCamelCase , sep=""".""" )
lowerCAmelCase__ : Optional[int] = pt_model.state_dict()
# keep track of unexpected & missing keys
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : int = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowerCAmelCase__ : Union[str, Any] = flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
lowerCAmelCase__ : Optional[int] = flax_key_tuple_array[:-1] + ["""weight"""]
lowerCAmelCase__ : Any = jnp.transpose(UpperCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
lowerCAmelCase__ : str = flax_key_tuple_array[:-1] + ["""weight"""]
lowerCAmelCase__ : Any = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
lowerCAmelCase__ : int = flax_key_tuple_array[:-1] + ["""weight"""]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(UpperCamelCase ):
lowerCAmelCase__ : List[str] = (
flax_key_tuple_string.replace("""_0""" , """.0""" )
.replace("""_1""" , """.1""" )
.replace("""_2""" , """.2""" )
.replace("""_3""" , """.3""" )
.replace("""_4""" , """.4""" )
.replace("""_5""" , """.5""" )
.replace("""_6""" , """.6""" )
.replace("""_7""" , """.7""" )
.replace("""_8""" , """.8""" )
.replace("""_9""" , """.9""" )
)
lowerCAmelCase__ : Union[str, Any] = """.""".join(UpperCamelCase )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
lowerCAmelCase__ : int = np.asarray(UpperCamelCase ) if not isinstance(UpperCamelCase , np.ndarray ) else flax_tensor
lowerCAmelCase__ : int = torch.from_numpy(UpperCamelCase )
# remove from missing keys
missing_keys.remove(UpperCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(UpperCamelCase )
pt_model.load_state_dict(UpperCamelCase )
# re-transform missing_keys to list
lowerCAmelCase__ : Optional[int] = list(UpperCamelCase )
if len(UpperCamelCase ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(UpperCamelCase ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
""" use it for predictions and inference.""" )
return pt_model
| 37 | 1 |
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = [1]
for i in range(2 , _UpperCAmelCase ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
__a = []
__a = list(range(_UpperCAmelCase ) )
# Find permutation
while factorials:
__a = factorials.pop()
__a , __a = divmod(_UpperCAmelCase , _UpperCAmelCase )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361 |
def __snake_case ( _UpperCAmelCase = 1000000 ):
__a = limit + 1
__a = [0] * limit
for first_term in range(1 , _UpperCAmelCase ):
for n in range(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
__a = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f'{solution() = }')
| 131 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = ["speech"]
def __init__( self , *snake_case , **snake_case ):
'''simple docstring'''
requires_backends(self , ["speech"] )
class UpperCamelCase__ ( metaclass=lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = ["speech"]
def __init__( self , *snake_case , **snake_case ):
'''simple docstring'''
requires_backends(self , ["speech"] )
| 311 |
'''simple docstring'''
from jiwer import compute_measures
import datasets
a : List[Any] = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
a : str = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
a : Union[str, Any] = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] , )
def A_ ( self , snake_case=None , snake_case=None , snake_case=False ):
'''simple docstring'''
if concatenate_texts:
return compute_measures(snake_case , snake_case )["wer"]
else:
UpperCAmelCase : Dict = 0
UpperCAmelCase : Optional[Any] = 0
for prediction, reference in zip(snake_case , snake_case ):
UpperCAmelCase : Tuple = compute_measures(snake_case , snake_case )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 311 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__UpperCamelCase = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 356 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class lowerCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = XGLMConfig
SCREAMING_SNAKE_CASE_ : List[str] = {}
SCREAMING_SNAKE_CASE_ : Optional[Any] = """gelu"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=14 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=32 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=512 , lowerCAmelCase__=0.02 , ) -> str:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = d_model
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = ffn_dim
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = 1
def __A ( self ) -> Optional[int]:
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = self.get_config()
SCREAMING_SNAKE_CASE = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def __A ( self ) -> int:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=lowerCAmelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=lowerCAmelCase__ , )
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : List[str] = (TFXGLMForCausalLM,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : int = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = False
def __A ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = TFXGLMModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=lowerCAmelCase__ , n_embd=37 )
def __A ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@slow
def __A ( self ) -> Tuple:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = TFXGLMModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def __A ( self ) -> Tuple:
super().test_resize_token_embeddings()
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __A ( self , lowerCAmelCase__=True ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
SCREAMING_SNAKE_CASE = tf.convert_to_tensor([[2, 268, 9_865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
SCREAMING_SNAKE_CASE = [2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581]
# fmt: on
SCREAMING_SNAKE_CASE = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , lowerCAmelCase__ )
@slow
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
SCREAMING_SNAKE_CASE = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
SCREAMING_SNAKE_CASE = tokenizer('Today is a nice day and' , return_tensors='tf' )
SCREAMING_SNAKE_CASE = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
SCREAMING_SNAKE_CASE = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ , seed=[7, 0] )
SCREAMING_SNAKE_CASE = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def __A ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
SCREAMING_SNAKE_CASE = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
SCREAMING_SNAKE_CASE = 'left'
# use different length sentences to test batching
SCREAMING_SNAKE_CASE = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase__ , return_tensors='tf' , padding=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = inputs['input_ids']
SCREAMING_SNAKE_CASE = model.generate(input_ids=lowerCAmelCase__ , attention_mask=inputs['attention_mask'] , max_new_tokens=12 )
SCREAMING_SNAKE_CASE = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
SCREAMING_SNAKE_CASE = model.generate(input_ids=lowerCAmelCase__ , max_new_tokens=12 )
SCREAMING_SNAKE_CASE = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
SCREAMING_SNAKE_CASE = model.generate(input_ids=lowerCAmelCase__ , max_new_tokens=12 )
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , [non_padded_sentence, padded_sentence] )
| 38 | 0 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCAmelCase_ : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
UpperCAmelCase_ : Optional[Any] = ' def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n'
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Any = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) )
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.transformer_dir
shutil.copy(
os.path.join(__snake_case , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : str = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def UpperCAmelCase_ ( self , __snake_case , __snake_case , __snake_case , __snake_case=None ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_SCREAMING_SNAKE_CASE : Tuple = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_SCREAMING_SNAKE_CASE : Union[str, Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_SCREAMING_SNAKE_CASE : str = black.format_str(__snake_case , mode=__snake_case )
_SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.transformer_dir , """new_code.py""" )
with open(__snake_case , """w""" , newline="""\n""" ) as f:
f.write(__snake_case )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__snake_case ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__snake_case )
with open(__snake_case , """r""" ) as f:
self.assertTrue(f.read() , __snake_case )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : List[Any] = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(__snake_case , __snake_case )
def UpperCAmelCase_ ( self ):
# Base copy consistency
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , __snake_case , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , __snake_case ) , )
# Copy consistency with a really long name
_SCREAMING_SNAKE_CASE : Tuple = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
f"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""" , f"""{long_class_name}LMPredictionHead""" , re.sub("""Bert""" , __snake_case , __snake_case ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , __snake_case , overwrite_result=re.sub("""Bert""" , """TestModel""" , __snake_case ) , )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Dict = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
_SCREAMING_SNAKE_CASE : List[str] = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
_SCREAMING_SNAKE_CASE : str = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_SCREAMING_SNAKE_CASE : str = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = check_copies.convert_to_localized_md(
__snake_case , __snake_case , localized_readme["""format_model_list"""] )
self.assertFalse(__snake_case )
self.assertEqual(__snake_case , __snake_case )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = check_copies.convert_to_localized_md(
__snake_case , __snake_case , localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__snake_case )
_SCREAMING_SNAKE_CASE : List[str] = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
_SCREAMING_SNAKE_CASE : List[Any] = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_SCREAMING_SNAKE_CASE : Optional[int] = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = check_copies.convert_to_localized_md(
__snake_case , __snake_case , localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(__snake_case , __snake_case )
| 200 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ['image_processor', 'tokenizer']
_lowerCamelCase : Tuple = 'OwlViTImageProcessor'
_lowerCamelCase : List[Any] = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : Optional[Any] , UpperCAmelCase : int=None , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : Any ):
A_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCAmelCase , )
A_ = kwargs.pop("feature_extractor" )
A_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self : Optional[int] , UpperCAmelCase : List[str]=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Dict="max_length" , UpperCAmelCase : Optional[Any]="np" , **UpperCAmelCase : Optional[int] ):
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(UpperCAmelCase , UpperCAmelCase ) or (isinstance(UpperCAmelCase , UpperCAmelCase ) and not isinstance(text[0] , UpperCAmelCase )):
A_ = [self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )]
elif isinstance(UpperCAmelCase , UpperCAmelCase ) and isinstance(text[0] , UpperCAmelCase ):
A_ = []
# Maximum number of queries across batch
A_ = max([len(UpperCAmelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(UpperCAmelCase ) != max_num_queries:
A_ = t + [" "] * (max_num_queries - len(UpperCAmelCase ))
A_ = self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
encodings.append(UpperCAmelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
A_ = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
A_ = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
A_ = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
A_ = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
A_ = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
A_ = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
A_ = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
A_ = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
A_ = BatchEncoding()
A_ = input_ids
A_ = attention_mask
if query_images is not None:
A_ = BatchEncoding()
A_ = self.image_processor(
UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase ).pixel_values
A_ = query_pixel_values
if images is not None:
A_ = self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if text is not None and images is not None:
A_ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
A_ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase ) , tensor_type=UpperCAmelCase )
def __A ( self : Optional[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : List[Any] ):
return self.image_processor.post_process(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : str , *UpperCAmelCase : str , **UpperCAmelCase : Union[str, Any] ):
return self.image_processor.post_process_object_detection(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : List[Any] , *UpperCAmelCase : int , **UpperCAmelCase : int ):
return self.image_processor.post_process_image_guided_detection(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : List[Any] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Any ):
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : Tuple , *UpperCAmelCase : Dict , **UpperCAmelCase : str ):
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def __A ( self : Union[str, Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCAmelCase , )
return self.image_processor_class
@property
def __A ( self : Optional[Any] ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCAmelCase , )
return self.image_processor | 312 | 0 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _A :
def __init__( self : Dict , __magic_name__ : Optional[int] , __magic_name__ : int=13 , __magic_name__ : Dict=32 , __magic_name__ : List[str]=2 , __magic_name__ : int=3 , __magic_name__ : int=16 , __magic_name__ : str=[32, 64, 1_28] , __magic_name__ : Union[str, Any]=[1, 2, 1] , __magic_name__ : Any=[2, 2, 4] , __magic_name__ : List[Any]=2 , __magic_name__ : List[str]=2.0 , __magic_name__ : Tuple=True , __magic_name__ : Tuple=0.0 , __magic_name__ : Optional[Any]=0.0 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : int="gelu" , __magic_name__ : int=False , __magic_name__ : int=True , __magic_name__ : Union[str, Any]=0.02 , __magic_name__ : Optional[Any]=1E-5 , __magic_name__ : Any=True , __magic_name__ : int=None , __magic_name__ : str=True , __magic_name__ : int=10 , __magic_name__ : List[str]=8 , __magic_name__ : Optional[Any]=["stage1", "stage2"] , __magic_name__ : Tuple=[1, 2] , ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = parent
__snake_case : str = batch_size
__snake_case : Union[str, Any] = image_size
__snake_case : List[str] = patch_size
__snake_case : Tuple = num_channels
__snake_case : Optional[Any] = embed_dim
__snake_case : str = hidden_sizes
__snake_case : str = depths
__snake_case : str = num_heads
__snake_case : Dict = window_size
__snake_case : List[str] = mlp_ratio
__snake_case : Dict = qkv_bias
__snake_case : Tuple = hidden_dropout_prob
__snake_case : Any = attention_probs_dropout_prob
__snake_case : List[Any] = drop_path_rate
__snake_case : List[str] = hidden_act
__snake_case : Union[str, Any] = use_absolute_embeddings
__snake_case : Tuple = patch_norm
__snake_case : Union[str, Any] = layer_norm_eps
__snake_case : Union[str, Any] = initializer_range
__snake_case : Optional[Any] = is_training
__snake_case : Union[str, Any] = scope
__snake_case : Union[str, Any] = use_labels
__snake_case : Any = type_sequence_label_size
__snake_case : Any = encoder_stride
__snake_case : List[str] = out_features
__snake_case : Optional[Any] = out_indices
def lowercase__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Optional[int] = None
if self.use_labels:
__snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : List[Any] = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowercase__ ( self : Optional[Any] , __magic_name__ : Any , __magic_name__ : int , __magic_name__ : int ) -> Dict:
"""simple docstring"""
__snake_case : Dict = FocalNetModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Union[str, Any] = model(__magic_name__ )
__snake_case : int = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__snake_case : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowercase__ ( self : int , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : Dict ) -> Any:
"""simple docstring"""
__snake_case : str = FocalNetBackbone(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : str = model(__magic_name__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__snake_case : Optional[Any] = None
__snake_case : str = FocalNetBackbone(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Tuple = model(__magic_name__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowercase__ ( self : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case : Any = FocalNetForMaskedImageModeling(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Optional[int] = model(__magic_name__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__snake_case : List[Any] = 1
__snake_case : Tuple = FocalNetForMaskedImageModeling(__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case : Union[str, Any] = model(__magic_name__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowercase__ ( self : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : List[Any] ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = self.type_sequence_label_size
__snake_case : Optional[int] = FocalNetForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Dict = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__snake_case : Optional[int] = 1
__snake_case : Optional[Any] = FocalNetForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case : Any = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : Tuple ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : Tuple = config_and_inputs
__snake_case : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _A ( __lowercase , __lowercase , unittest.TestCase ):
lowercase__: int = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowercase__: List[str] = (
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowercase__: int = False
lowercase__: str = False
lowercase__: List[Any] = False
lowercase__: Optional[int] = False
lowercase__: Optional[Any] = False
def lowercase__ ( self : int ) -> Optional[int]:
"""simple docstring"""
__snake_case : List[str] = FocalNetModelTester(self )
__snake_case : List[str] = ConfigTester(self , config_class=__magic_name__ , embed_dim=37 , has_text_modality=__magic_name__ )
def lowercase__ ( self : str ) -> List[Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return
def lowercase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def lowercase__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__magic_name__ )
def lowercase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__magic_name__ )
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def lowercase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def lowercase__ ( self : Any ) -> int:
"""simple docstring"""
pass
def lowercase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__snake_case , __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__snake_case : Any = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def lowercase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__snake_case : Optional[int] = model_class(__magic_name__ )
__snake_case : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Optional[Any] = [*signature.parameters.keys()]
__snake_case : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def lowercase__ ( self : int , __magic_name__ : int , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : int ) -> Any:
"""simple docstring"""
__snake_case : Optional[int] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : Union[str, Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : Any = outputs.hidden_states
__snake_case : Union[str, Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
# FocalNet has a different seq_length
__snake_case : Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__snake_case : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__snake_case : Any = outputs.reshaped_hidden_states
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
__snake_case , __snake_case , __snake_case , __snake_case : Dict = reshaped_hidden_states[0].shape
__snake_case : List[str] = (
reshaped_hidden_states[0].view(__magic_name__ , __magic_name__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowercase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__snake_case : Tuple = True
self.check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : int = True
self.check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : Dict ) -> str:
"""simple docstring"""
__snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Tuple = 3
__snake_case : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__snake_case : Union[str, Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__snake_case : Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__snake_case : Tuple = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__snake_case : List[Any] = True
self.check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Optional[int] = True
self.check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ , (padded_height, padded_width) )
@slow
def lowercase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : List[Any] = FocalNetModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def lowercase__ ( self : Dict ) -> str:
"""simple docstring"""
__snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Dict = _config_zero_init(__magic_name__ )
for model_class in self.all_model_classes:
__snake_case : Optional[int] = model_class(config=__magic_name__ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class _A ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def lowercase__ ( self : Any ) -> Tuple:
"""simple docstring"""
__snake_case : Optional[int] = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(__magic_name__ )
__snake_case : Optional[int] = self.default_image_processor
__snake_case : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__snake_case : Optional[Any] = image_processor(images=__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
__snake_case : int = model(**__magic_name__ )
# verify the logits
__snake_case : Union[str, Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
__snake_case : Optional[int] = torch.tensor([0.2166, -0.4368, 0.2191] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_81 )
@require_torch
class _A ( __lowercase , unittest.TestCase ):
lowercase__: int = (FocalNetBackbone,) if is_torch_available() else ()
lowercase__: str = FocalNetConfig
lowercase__: Dict = False
def lowercase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
__snake_case : Optional[Any] = FocalNetModelTester(self )
| 13 |
'''simple docstring'''
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
__UpperCamelCase = ["small", "medium", "large"]
__UpperCamelCase = "lm_head.decoder.weight"
__UpperCamelCase = "lm_head.weight"
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = torch.load(_lowerCamelCase )
__snake_case : Optional[int] = d.pop(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
torch.save(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--dialogpt_path", default=".", type=str)
__UpperCamelCase = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
__UpperCamelCase = os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""")
__UpperCamelCase = f"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 13 | 1 |
import requests
from bsa import BeautifulSoup
def UpperCamelCase( __UpperCamelCase : str = "AAPL" ):
lowerCAmelCase_ : Optional[Any] = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
lowerCAmelCase_ : Any = BeautifulSoup(requests.get(__UpperCamelCase ).text ,'''html.parser''' )
lowerCAmelCase_ : int = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' ,class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 103 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : Union[str, Any] = logging.get_logger(__name__)
A__ : Tuple = {
'''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''',
'''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __snake_case ( UpperCamelCase_ ):
_a = '''xlm-roberta-xl'''
def __init__( self : int , A_ : List[str]=2_5_0_8_8_0 , A_ : List[str]=2_5_6_0 , A_ : Optional[int]=3_6 , A_ : List[Any]=3_2 , A_ : Optional[int]=1_0_2_4_0 , A_ : Dict="gelu" , A_ : int=0.1 , A_ : Optional[Any]=0.1 , A_ : int=5_1_4 , A_ : Any=1 , A_ : Optional[Any]=0.02 , A_ : str=1e-05 , A_ : Dict=1 , A_ : Any=0 , A_ : Tuple=2 , A_ : str="absolute" , A_ : str=True , A_ : List[str]=None , **A_ : Dict , ):
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_)
lowerCAmelCase_ : Tuple = vocab_size
lowerCAmelCase_ : List[str] = hidden_size
lowerCAmelCase_ : int = num_hidden_layers
lowerCAmelCase_ : int = num_attention_heads
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : int = intermediate_size
lowerCAmelCase_ : Tuple = hidden_dropout_prob
lowerCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase_ : Union[str, Any] = max_position_embeddings
lowerCAmelCase_ : Dict = type_vocab_size
lowerCAmelCase_ : str = initializer_range
lowerCAmelCase_ : str = layer_norm_eps
lowerCAmelCase_ : Optional[Any] = position_embedding_type
lowerCAmelCase_ : Optional[Any] = use_cache
lowerCAmelCase_ : List[str] = classifier_dropout
class __snake_case ( UpperCamelCase_ ):
@property
def UpperCAmelCase__ ( self : List[str]):
if self.task == "multiple-choice":
lowerCAmelCase_ : Union[str, Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCAmelCase_ : List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
])
| 103 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : str = {
"""configuration_xlm_roberta_xl""": [
"""XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XLMRobertaXLConfig""",
"""XLMRobertaXLOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
"""XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMRobertaXLForCausalLM""",
"""XLMRobertaXLForMaskedLM""",
"""XLMRobertaXLForMultipleChoice""",
"""XLMRobertaXLForQuestionAnswering""",
"""XLMRobertaXLForSequenceClassification""",
"""XLMRobertaXLForTokenClassification""",
"""XLMRobertaXLModel""",
"""XLMRobertaXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
__lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 360 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : str = {
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "roberta-prelayernorm"
def __init__( self : Tuple , __A : Any=5_0_2_6_5 , __A : Optional[int]=7_6_8 , __A : Dict=1_2 , __A : Union[str, Any]=1_2 , __A : List[Any]=3_0_7_2 , __A : Optional[Any]="gelu" , __A : Optional[int]=0.1 , __A : Tuple=0.1 , __A : Optional[Any]=5_1_2 , __A : List[str]=2 , __A : Optional[int]=0.0_2 , __A : Tuple=1e-1_2 , __A : Any=1 , __A : str=0 , __A : int=2 , __A : List[str]="absolute" , __A : Optional[Any]=True , __A : List[Any]=None , **__A : Optional[Any] , ):
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
snake_case__ : Tuple = vocab_size
snake_case__ : Optional[Any] = hidden_size
snake_case__ : List[Any] = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Dict = hidden_act
snake_case__ : Union[str, Any] = intermediate_size
snake_case__ : List[Any] = hidden_dropout_prob
snake_case__ : Any = attention_probs_dropout_prob
snake_case__ : int = max_position_embeddings
snake_case__ : Tuple = type_vocab_size
snake_case__ : Optional[int] = initializer_range
snake_case__ : int = layer_norm_eps
snake_case__ : Dict = position_embedding_type
snake_case__ : int = use_cache
snake_case__ : Dict = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
@property
def _lowercase ( self : Optional[int] ):
if self.task == "multiple-choice":
snake_case__ : List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case__ : Tuple = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 286 | 0 |
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Dict = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 29 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
lowerCamelCase = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
lowerCamelCase = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def lowerCamelCase_ ( _a ):
"""simple docstring"""
lowerCAmelCase__ : Dict = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=_a )[0]
@deprecated(_a , '''Please use tf.data to implement this functionality.''' )
def lowerCamelCase_ ( _a ):
"""simple docstring"""
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_a ) as bytestream:
lowerCAmelCase__ : Any = _readaa(_a )
if magic != 2_051:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
lowerCAmelCase__ : Any = _readaa(_a )
lowerCAmelCase__ : Tuple = _readaa(_a )
lowerCAmelCase__ : List[Any] = _readaa(_a )
lowerCAmelCase__ : Union[str, Any] = bytestream.read(rows * cols * num_images )
lowerCAmelCase__ : List[Any] = numpy.frombuffer(_a , dtype=numpy.uinta )
lowerCAmelCase__ : int = data.reshape(_a , _a , _a , 1 )
return data
@deprecated(_a , '''Please use tf.one_hot on tensors.''' )
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = labels_dense.shape[0]
lowerCAmelCase__ : Optional[Any] = numpy.arange(_a ) * num_classes
lowerCAmelCase__ : str = numpy.zeros((num_labels, num_classes) )
lowerCAmelCase__ : Optional[Any] = 1
return labels_one_hot
@deprecated(_a , '''Please use tf.data to implement this functionality.''' )
def lowerCamelCase_ ( _a , _a=False , _a=10 ):
"""simple docstring"""
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_a ) as bytestream:
lowerCAmelCase__ : Optional[int] = _readaa(_a )
if magic != 2_049:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
lowerCAmelCase__ : Union[str, Any] = _readaa(_a )
lowerCAmelCase__ : Tuple = bytestream.read(_a )
lowerCAmelCase__ : Dict = numpy.frombuffer(_a , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_a , _a )
return labels
class _a :
@deprecated(
_SCREAMING_SNAKE_CASE , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self : Dict , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple=False , _SCREAMING_SNAKE_CASE : Any=False , _SCREAMING_SNAKE_CASE : Optional[Any]=dtypes.floataa , _SCREAMING_SNAKE_CASE : List[str]=True , _SCREAMING_SNAKE_CASE : List[str]=None , )-> List[Any]:
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = random_seed.get_seed(_SCREAMING_SNAKE_CASE )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowerCAmelCase__ : Optional[int] = dtypes.as_dtype(_SCREAMING_SNAKE_CASE ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
lowerCAmelCase__ : int = 1_0000
lowerCAmelCase__ : List[Any] = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F'images.shape: {images.shape} labels.shape: {labels.shape}'
lowerCAmelCase__ : List[Any] = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowerCAmelCase__ : Tuple = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowerCAmelCase__ : Any = images.astype(numpy.floataa )
lowerCAmelCase__ : Any = numpy.multiply(_SCREAMING_SNAKE_CASE , 1.0 / 255.0 )
lowerCAmelCase__ : Tuple = images
lowerCAmelCase__ : Tuple = labels
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : Tuple = 0
@property
def UpperCAmelCase__( self : Tuple )-> Dict:
return self._images
@property
def UpperCAmelCase__( self : Tuple )-> Optional[int]:
return self._labels
@property
def UpperCAmelCase__( self : Tuple )-> Dict:
return self._num_examples
@property
def UpperCAmelCase__( self : Tuple )-> Any:
return self._epochs_completed
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Dict=False , _SCREAMING_SNAKE_CASE : Optional[int]=True )-> List[str]:
if fake_data:
lowerCAmelCase__ : Dict = [1] * 784
lowerCAmelCase__ : Union[str, Any] = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(_SCREAMING_SNAKE_CASE )],
[fake_label for _ in range(_SCREAMING_SNAKE_CASE )],
)
lowerCAmelCase__ : str = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowerCAmelCase__ : Union[str, Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = self.images[perma]
lowerCAmelCase__ : Tuple = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowerCAmelCase__ : Any = self._num_examples - start
lowerCAmelCase__ : List[str] = self._images[start : self._num_examples]
lowerCAmelCase__ : Tuple = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowerCAmelCase__ : Union[str, Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : str = self.images[perm]
lowerCAmelCase__ : List[Any] = self.labels[perm]
# Start next epoch
lowerCAmelCase__ : Dict = 0
lowerCAmelCase__ : Union[str, Any] = batch_size - rest_num_examples
lowerCAmelCase__ : Any = self._index_in_epoch
lowerCAmelCase__ : Optional[Any] = self._images[start:end]
lowerCAmelCase__ : Optional[Any] = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowerCAmelCase__ : Dict = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_a , '''Please write your own downloading logic.''' )
def lowerCamelCase_ ( _a , _a , _a ):
"""simple docstring"""
if not gfile.Exists(_a ):
gfile.MakeDirs(_a )
lowerCAmelCase__ : str = os.path.join(_a , _a )
if not gfile.Exists(_a ):
urllib.request.urlretrieve(_a , _a ) # noqa: S310
with gfile.GFile(_a ) as f:
lowerCAmelCase__ : Optional[Any] = f.size()
print('''Successfully downloaded''' , _a , _a , '''bytes.''' )
return filepath
@deprecated(
_a , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def lowerCamelCase_ ( _a , _a=False , _a=False , _a=dtypes.floataa , _a=True , _a=5_000 , _a=None , _a=DEFAULT_SOURCE_URL , ):
"""simple docstring"""
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_a , one_hot=_a , dtype=_a , seed=_a )
lowerCAmelCase__ : Tuple = fake()
lowerCAmelCase__ : Union[str, Any] = fake()
lowerCAmelCase__ : Tuple = fake()
return _Datasets(train=_a , validation=_a , test=_a )
if not source_url: # empty string check
lowerCAmelCase__ : Optional[Any] = DEFAULT_SOURCE_URL
lowerCAmelCase__ : Tuple = '''train-images-idx3-ubyte.gz'''
lowerCAmelCase__ : Dict = '''train-labels-idx1-ubyte.gz'''
lowerCAmelCase__ : List[str] = '''t10k-images-idx3-ubyte.gz'''
lowerCAmelCase__ : Optional[int] = '''t10k-labels-idx1-ubyte.gz'''
lowerCAmelCase__ : Optional[Any] = _maybe_download(
_a , _a , source_url + train_images_file )
with gfile.Open(_a , '''rb''' ) as f:
lowerCAmelCase__ : Optional[Any] = _extract_images(_a )
lowerCAmelCase__ : Any = _maybe_download(
_a , _a , source_url + train_labels_file )
with gfile.Open(_a , '''rb''' ) as f:
lowerCAmelCase__ : Any = _extract_labels(_a , one_hot=_a )
lowerCAmelCase__ : Any = _maybe_download(
_a , _a , source_url + test_images_file )
with gfile.Open(_a , '''rb''' ) as f:
lowerCAmelCase__ : str = _extract_images(_a )
lowerCAmelCase__ : Dict = _maybe_download(
_a , _a , source_url + test_labels_file )
with gfile.Open(_a , '''rb''' ) as f:
lowerCAmelCase__ : int = _extract_labels(_a , one_hot=_a )
if not 0 <= validation_size <= len(_a ):
lowerCAmelCase__ : Dict = (
'''Validation size should be between 0 and '''
f'{len(_a )}. Received: {validation_size}.'
)
raise ValueError(_a )
lowerCAmelCase__ : List[str] = train_images[:validation_size]
lowerCAmelCase__ : Any = train_labels[:validation_size]
lowerCAmelCase__ : Optional[Any] = train_images[validation_size:]
lowerCAmelCase__ : Optional[int] = train_labels[validation_size:]
lowerCAmelCase__ : Optional[Any] = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
lowerCAmelCase__ : List[str] = _DataSet(_a , _a , **_a )
lowerCAmelCase__ : Dict = _DataSet(_a , _a , **_a )
lowerCAmelCase__ : Dict = _DataSet(_a , _a , **_a )
return _Datasets(train=_a , validation=_a , test=_a )
| 131 | 0 |
import os
import string
import sys
__A = 1 << 8
__A = {
"tab": ord("\t"),
"newline": ord("\r"),
"esc": 27,
"up": 65 + ARROW_KEY_FLAG,
"down": 66 + ARROW_KEY_FLAG,
"right": 67 + ARROW_KEY_FLAG,
"left": 68 + ARROW_KEY_FLAG,
"mod_int": 91,
"undefined": sys.maxsize,
"interrupt": 3,
"insert": 50,
"delete": 51,
"pg_up": 53,
"pg_down": 54,
}
__A = KEYMAP["up"]
__A = KEYMAP["left"]
if sys.platform == "win32":
__A = []
__A = {
b"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG,
b"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG,
}
for i in range(10):
__A = ord(str(i))
def lowerCAmelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
if os.name == "nt":
import msvcrt
lowerCamelCase__: Optional[Any] ="mbcs"
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__a ) == 0:
# Read the keystroke
lowerCamelCase__: Optional[int] =msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCamelCase__: List[str] =ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCamelCase__: Optional[Any] =chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["mod_int"] ) )
WIN_CH_BUFFER.append(__a )
if ord(__a ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
lowerCamelCase__: List[str] =chr(KEYMAP["esc"] )
except KeyError:
lowerCamelCase__: Any =cha[1]
else:
lowerCamelCase__: Dict =ch.decode(__a )
else:
lowerCamelCase__: Any =WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCamelCase__: int =sys.stdin.fileno()
lowerCamelCase__: Optional[int] =termios.tcgetattr(__a )
try:
tty.setraw(__a )
lowerCamelCase__: int =sys.stdin.read(1 )
finally:
termios.tcsetattr(__a , termios.TCSADRAIN , __a )
return ch
def lowerCAmelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__: Optional[Any] =get_raw_chars()
if ord(__a ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__a ) == KEYMAP["esc"]:
lowerCamelCase__: List[Any] =get_raw_chars()
if ord(__a ) == KEYMAP["mod_int"]:
lowerCamelCase__: int =get_raw_chars()
if ord(__a ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__a ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__a ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 355 |
def lowerCAmelCase_ ( __a , __a ) -> Tuple:
"""simple docstring"""
assert x is not None
assert y is not None
lowerCamelCase__: Any =len(__a )
lowerCamelCase__: int =len(__a )
# declaring the array for storing the dp values
lowerCamelCase__: List[Any] =[[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
lowerCamelCase__: str =1 if x[i - 1] == y[j - 1] else 0
lowerCamelCase__: str =max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
lowerCamelCase__: Any =""
lowerCamelCase__ , lowerCamelCase__: str =m, n
while i > 0 and j > 0:
lowerCamelCase__: Union[str, Any] =1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
lowerCamelCase__: Any =x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
__A = "AGGTAB"
__A = "GXTXAYB"
__A = 4
__A = "GTAB"
__A , __A = longest_common_subsequence(a, b)
print("len =", ln, ", sub-sequence =", subseq)
import doctest
doctest.testmod()
| 273 | 0 |
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class UpperCAmelCase_ :
def __init__( self ) -> str:
__lowercase : List[str] = psutil.Process()
__lowercase : Optional[Any] = False
def _lowerCamelCase ( self ) -> List[str]:
__lowercase : List[str] = -1
while True:
__lowercase : Union[str, Any] = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def _lowerCamelCase ( self ) -> Dict:
__lowercase : Dict = True
__lowercase : List[str] = threading.Thread(target=self.peak_monitor )
__lowercase : Optional[Any] = True
self.thread.start()
def _lowerCamelCase ( self ) -> Union[str, Any]:
__lowercase : Any = False
self.thread.join()
return self.cpu_memory_peak
a_ = PeakCPUMemory()
def __UpperCAmelCase ( ):
__lowercase : Dict = {"""time""": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowercase : Optional[Any] = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowercase : Union[str, Any] = torch.cuda.memory_allocated(__UpperCamelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : List[Any] = {"""time""": time.time() - start_measures["""time"""]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowercase : str = (psutil.Process().memory_info().rss - start_measures["""cpu"""]) / 2**20
__lowercase : Optional[Any] = (cpu_peak_tracker.stop() - start_measures["""cpu"""]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowercase : Optional[int] = (torch.cuda.memory_allocated(__UpperCamelCase ) - start_measures[str(__UpperCamelCase )]) / 2**20
__lowercase : Any = (torch.cuda.max_memory_allocated(__UpperCamelCase ) - start_measures[str(__UpperCamelCase )]) / 2**20
return measures
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
print(f"""{description}:""" )
print(f"""- Time: {measures["time"]:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(f"""- GPU {i} allocated: {measures[str(__UpperCamelCase )]:.2f}MiB""" )
__lowercase : Optional[Any] = measures[f"""{i}-peak"""]
print(f"""- GPU {i} peak: {peak:.2f}MiB""" )
print(f"""- CPU RAM allocated: {measures["cpu"]:.2f}MiB""" )
print(f"""- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB""" )
| 249 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Any = """openai/whisper-base"""
snake_case__ : Optional[int] = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
snake_case__ : Any = """transcriber"""
snake_case__ : Optional[int] = WhisperProcessor
snake_case__ : str = WhisperForConditionalGeneration
snake_case__ : Optional[Any] = ["""audio"""]
snake_case__ : Any = ["""text"""]
def _A ( self : str , __lowerCamelCase : Dict ):
return self.pre_processor(__lowerCamelCase , return_tensors="""pt""" ).input_features
def _A ( self : Dict , __lowerCamelCase : List[Any] ):
return self.model.generate(inputs=__lowerCamelCase )
def _A ( self : Any , __lowerCamelCase : Optional[Any] ):
return self.pre_processor.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )[0]
| 38 | 0 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __lowerCamelCase ( lowerCAmelCase__ : Tuple ):
for param in module.parameters():
lowerCAmelCase__ = False
def __lowerCamelCase ( ):
lowerCAmelCase__ = 'cuda' if torch.cuda.is_available() else 'cpu'
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowerCAmelCase__ = 'mps'
if device == "mps":
print(
'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'
' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'
' with generations.' )
return device
def __lowerCamelCase ( lowerCAmelCase__ : Tuple ):
lowerCAmelCase__ = plt.imshow(lowerCAmelCase__ )
fig.axes.get_xaxis().set_visible(lowerCAmelCase__ )
fig.axes.get_yaxis().set_visible(lowerCAmelCase__ )
plt.show()
def __lowerCamelCase ( ):
lowerCAmelCase__ = datetime.now()
lowerCAmelCase__ = current_time.strftime('%H:%M:%S' )
return timestamp
| 359 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCAmelCase__ = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCAmelCase__ = TaTokenizerFast
lowerCAmelCase__ = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCAmelCase__ = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
)
| 119 | 0 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowercase :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str]=13 , lowerCAmelCase__ : str=32 , lowerCAmelCase__ : Tuple=2 , lowerCAmelCase__ : List[str]=3 , lowerCAmelCase__ : Optional[int]=16 , lowerCAmelCase__ : Optional[int]=[32, 64, 128] , lowerCAmelCase__ : Union[str, Any]=[1, 2, 1] , lowerCAmelCase__ : Optional[int]=[2, 2, 4] , lowerCAmelCase__ : Optional[int]=2 , lowerCAmelCase__ : Dict=2.0 , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : List[str]=0.0 , lowerCAmelCase__ : Union[str, Any]=0.0 , lowerCAmelCase__ : Optional[int]=0.1 , lowerCAmelCase__ : Optional[Any]="gelu" , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Any=0.02 , lowerCAmelCase__ : List[str]=1E-5 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Any=10 , lowerCAmelCase__ : Optional[int]=8 , lowerCAmelCase__ : List[Any]=["stage1", "stage2"] , lowerCAmelCase__ : str=[1, 2] , ):
SCREAMING_SNAKE_CASE_: Tuple = parent
SCREAMING_SNAKE_CASE_: str = batch_size
SCREAMING_SNAKE_CASE_: int = image_size
SCREAMING_SNAKE_CASE_: str = patch_size
SCREAMING_SNAKE_CASE_: Optional[Any] = num_channels
SCREAMING_SNAKE_CASE_: str = embed_dim
SCREAMING_SNAKE_CASE_: Optional[int] = hidden_sizes
SCREAMING_SNAKE_CASE_: str = depths
SCREAMING_SNAKE_CASE_: Optional[int] = num_heads
SCREAMING_SNAKE_CASE_: Optional[Any] = window_size
SCREAMING_SNAKE_CASE_: int = mlp_ratio
SCREAMING_SNAKE_CASE_: Any = qkv_bias
SCREAMING_SNAKE_CASE_: List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_: List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: Optional[Any] = drop_path_rate
SCREAMING_SNAKE_CASE_: Optional[int] = hidden_act
SCREAMING_SNAKE_CASE_: Dict = use_absolute_embeddings
SCREAMING_SNAKE_CASE_: Any = patch_norm
SCREAMING_SNAKE_CASE_: str = layer_norm_eps
SCREAMING_SNAKE_CASE_: Any = initializer_range
SCREAMING_SNAKE_CASE_: List[str] = is_training
SCREAMING_SNAKE_CASE_: Dict = scope
SCREAMING_SNAKE_CASE_: Any = use_labels
SCREAMING_SNAKE_CASE_: List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE_: Optional[int] = encoder_stride
SCREAMING_SNAKE_CASE_: int = out_features
SCREAMING_SNAKE_CASE_: str = out_indices
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_: List[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_: int = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : Tuple):
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any]):
SCREAMING_SNAKE_CASE_: Optional[Any] = FocalNetModel(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Tuple = model(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
SCREAMING_SNAKE_CASE_: Optional[int] = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any]):
SCREAMING_SNAKE_CASE_: str = FocalNetBackbone(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Tuple = model(lowerCAmelCase__)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.image_size, 8, 8])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1])
# verify backbone works with out_features=None
SCREAMING_SNAKE_CASE_: Optional[Any] = None
SCREAMING_SNAKE_CASE_: Optional[int] = FocalNetBackbone(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: List[str] = model(lowerCAmelCase__)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.image_size * 2, 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Optional[int] = FocalNetForMaskedImageModeling(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: List[Any] = model(lowerCAmelCase__)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
SCREAMING_SNAKE_CASE_: Optional[Any] = 1
SCREAMING_SNAKE_CASE_: Any = FocalNetForMaskedImageModeling(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_: str = model(lowerCAmelCase__)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.type_sequence_label_size
SCREAMING_SNAKE_CASE_: str = FocalNetForImageClassification(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: int = model(lowerCAmelCase__ , labels=lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
SCREAMING_SNAKE_CASE_: Optional[Any] = 1
SCREAMING_SNAKE_CASE_: Optional[Any] = FocalNetForImageClassification(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_: str = model(lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: List[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = config_and_inputs
SCREAMING_SNAKE_CASE_: Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Optional[int] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
_UpperCAmelCase : Dict = (
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase : int = False
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Optional[int] = False
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Optional[Any] = FocalNetModelTester(self)
SCREAMING_SNAKE_CASE_: str = ConfigTester(self , config_class=lowerCAmelCase__ , embed_dim=37 , has_text_modality=lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Any):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE ( self : int):
return
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__)
@unittest.skip(reason="FocalNet does not use inputs_embeds")
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking")
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
pass
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
SCREAMING_SNAKE_CASE_: str = model_class(lowerCAmelCase__)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
SCREAMING_SNAKE_CASE_: Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
SCREAMING_SNAKE_CASE_: Optional[int] = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_: List[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_: List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int]):
SCREAMING_SNAKE_CASE_: Tuple = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Tuple = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__))
SCREAMING_SNAKE_CASE_: List[str] = outputs.hidden_states
SCREAMING_SNAKE_CASE_: Any = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths) + 1)
self.assertEqual(len(lowerCAmelCase__) , lowerCAmelCase__)
# FocalNet has a different seq_length
SCREAMING_SNAKE_CASE_: Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE_: List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
SCREAMING_SNAKE_CASE_: Dict = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCAmelCase__) , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = reshaped_hidden_states[0].shape
SCREAMING_SNAKE_CASE_: List[Any] = (
reshaped_hidden_states[0].view(lowerCAmelCase__ , lowerCAmelCase__ , height * width).permute(0 , 2 , 1)
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_: Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
SCREAMING_SNAKE_CASE_: List[str] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_: List[Any] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_: List[str] = 3
SCREAMING_SNAKE_CASE_: int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
SCREAMING_SNAKE_CASE_: Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE_: List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
SCREAMING_SNAKE_CASE_: List[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
SCREAMING_SNAKE_CASE_: Optional[int] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_: Any = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , (padded_height, padded_width))
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_: str = FocalNetModel.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_: int = _config_zero_init(lowerCAmelCase__)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: str = model_class(config=lowerCAmelCase__)
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _SCREAMING_SNAKE_CASE ( self : List[str]):
# TODO update organization
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny") if is_vision_available() else None
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: Dict = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny").to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = self.default_image_processor
SCREAMING_SNAKE_CASE_: Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
SCREAMING_SNAKE_CASE_: Tuple = image_processor(images=lowerCAmelCase__ , return_tensors="pt").to(lowerCAmelCase__)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[int] = model(**lowerCAmelCase__)
# verify the logits
SCREAMING_SNAKE_CASE_: Any = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = torch.tensor([0.2166, -0.4368, 0.2191]).to(lowerCAmelCase__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4))
self.assertTrue(outputs.logits.argmax(dim=-1).item() , 281)
@require_torch
class __lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : int = (FocalNetBackbone,) if is_torch_available() else ()
_UpperCAmelCase : int = FocalNetConfig
_UpperCAmelCase : Union[str, Any] = False
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Optional[Any] = FocalNetModelTester(self)
| 13 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase_ )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : str = field(default='''automatic-speech-recognition''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_UpperCAmelCase : ClassVar[Features] = Features({'''audio''': Audio()} )
_UpperCAmelCase : ClassVar[Features] = Features({'''transcription''': Value('''string''' )} )
_UpperCAmelCase : str = "audio"
_UpperCAmelCase : str = "transcription"
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : int):
if self.audio_column not in features:
raise ValueError(F"Column {self.audio_column} is not present in features.")
if not isinstance(features[self.audio_column] , lowerCAmelCase__):
raise ValueError(F"Column {self.audio_column} is not an Audio type.")
SCREAMING_SNAKE_CASE_: Tuple = copy.deepcopy(self)
SCREAMING_SNAKE_CASE_: Optional[int] = self.input_schema.copy()
SCREAMING_SNAKE_CASE_: Dict = features[self.audio_column]
SCREAMING_SNAKE_CASE_: int = input_schema
return task_template
@property
def _SCREAMING_SNAKE_CASE ( self : int):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 13 | 1 |
lowerCAmelCase__ :Tuple = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowerCAmelCase__ :Optional[Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowerCAmelCase__ :str = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 356 |
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
lowerCAmelCase__ :Tuple = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''bert''', choices=['''bert'''])
parser.add_argument('''--model_name''', default='''bert-base-uncased''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_bert-base-uncased_0247911.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
lowerCAmelCase__ :Optional[int] = parser.parse_args()
if args.model_type == "bert":
lowerCAmelCase__ :Tuple = BertForMaskedLM.from_pretrained(args.model_name)
lowerCAmelCase__ :Optional[int] = '''bert'''
else:
raise ValueError('''args.model_type should be "bert".''')
lowerCAmelCase__ :Any = model.state_dict()
lowerCAmelCase__ :Dict = {}
for w in ["word_embeddings", "position_embeddings"]:
lowerCAmelCase__ :List[Any] = state_dict[f'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
lowerCAmelCase__ :Union[str, Any] = state_dict[f'''{prefix}.embeddings.LayerNorm.{w}''']
lowerCAmelCase__ :str = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
lowerCAmelCase__ :Any = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
lowerCAmelCase__ :List[Any] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
lowerCAmelCase__ :List[str] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
lowerCAmelCase__ :List[Any] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
lowerCAmelCase__ :int = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
lowerCAmelCase__ :List[Any] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
lowerCAmelCase__ :List[Any] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
lowerCAmelCase__ :List[Any] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
lowerCAmelCase__ :Optional[int] = state_dict['''cls.predictions.decoder.weight''']
lowerCAmelCase__ :List[str] = state_dict['''cls.predictions.bias''']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCAmelCase__ :Any = state_dict[f'''cls.predictions.transform.dense.{w}''']
lowerCAmelCase__ :List[str] = state_dict[f'''cls.predictions.transform.LayerNorm.{w}''']
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 185 | 0 |
'''simple docstring'''
import numpy as np
from PIL import Image
def _lowerCAmelCase ( _UpperCamelCase : np.ndarray , _UpperCamelCase : int , _UpperCamelCase : int ) -> np.ndarray:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =np.array(_UpperCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
# compute the shape of the output matrix
_SCREAMING_SNAKE_CASE =(arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
_SCREAMING_SNAKE_CASE =np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
_SCREAMING_SNAKE_CASE =np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
return updated_arr
def _lowerCAmelCase ( _UpperCamelCase : np.ndarray , _UpperCamelCase : int , _UpperCamelCase : int ) -> np.ndarray:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =np.array(_UpperCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
# compute the shape of the output matrix
_SCREAMING_SNAKE_CASE =(arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
_SCREAMING_SNAKE_CASE =np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
_SCREAMING_SNAKE_CASE =int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="avgpooling", verbose=True)
# Loading the image
lowerCamelCase : Optional[Any] = Image.open("path_to_image")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 47 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase_ : Optional[Any] = {
'huggingface/informer-tourism-monthly': (
'https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : Tuple = """informer"""
lowercase_ : str = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , snake_case_ = None , snake_case_ = None , snake_case_ = "student_t" , snake_case_ = "nll" , snake_case_ = 1 , snake_case_ = None , snake_case_ = "mean" , snake_case_ = 0 , snake_case_ = 0 , snake_case_ = 0 , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = 6_4 , snake_case_ = 3_2 , snake_case_ = 3_2 , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = True , snake_case_ = "gelu" , snake_case_ = 0.05 , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 1_0_0 , snake_case_ = 0.02 , snake_case_=True , snake_case_ = "prob" , snake_case_ = 5 , snake_case_ = True , **snake_case_ , ):
"""simple docstring"""
A_ : str = prediction_length
A_ : List[Any] = context_length or prediction_length
A_ : str = distribution_output
A_ : Dict = loss
A_ : Any = input_size
A_ : Union[str, Any] = num_time_features
A_ : Optional[Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
A_ : List[Any] = scaling
A_ : Tuple = num_dynamic_real_features
A_ : Any = num_static_real_features
A_ : str = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(snake_case_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
A_ : Optional[int] = cardinality
else:
A_ : Optional[Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
A_ : Any = embedding_dimension
else:
A_ : Optional[Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
A_ : int = num_parallel_samples
# Transformer architecture configuration
A_ : str = input_size * len(self.lags_sequence ) + self._number_of_features
A_ : List[Any] = d_model
A_ : Dict = encoder_attention_heads
A_ : Dict = decoder_attention_heads
A_ : List[Any] = encoder_ffn_dim
A_ : Union[str, Any] = decoder_ffn_dim
A_ : int = encoder_layers
A_ : Any = decoder_layers
A_ : List[Any] = dropout
A_ : str = attention_dropout
A_ : Tuple = activation_dropout
A_ : List[str] = encoder_layerdrop
A_ : List[str] = decoder_layerdrop
A_ : str = activation_function
A_ : Optional[int] = init_std
A_ : List[Any] = use_cache
# Informer
A_ : Tuple = attention_type
A_ : List[Any] = sampling_factor
A_ : Optional[int] = distil
super().__init__(is_encoder_decoder=snake_case_ , **snake_case_ )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
) | 286 | 0 |
import math
def lowerCAmelCase_ ( _snake_case : int ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : str = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(snake_case_ )
def lowerCAmelCase_ ( _snake_case : float = 1 / 12345 ) -> Tuple:
'''simple docstring'''
__magic_name__ : str = 0
__magic_name__ : Dict = 0
__magic_name__ : Dict = 3
while True:
__magic_name__ : Union[str, Any] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(snake_case_ ):
__magic_name__ : List[str] = int(snake_case_ )
total_partitions += 1
if check_partition_perfect(snake_case_ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(snake_case_ )
integer += 1
if __name__ == "__main__":
print(F"{solution() = }")
| 355 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
snake_case : List[str] = logging.get_logger(__name__)
snake_case : Tuple = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
snake_case : Dict = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _snake_case :
UpperCamelCase__ = field(
default=snake_case , metadata={'help': 'Model type selected in the list: ' + ', '.join(snake_case )} )
UpperCamelCase__ = field(
default=snake_case , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
UpperCamelCase__ = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
UpperCamelCase__ = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
UpperCamelCase__ = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
UpperCamelCase__ = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
UpperCamelCase__ = field(
default=snake_case , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
UpperCamelCase__ = field(
default=snake_case , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
UpperCamelCase__ = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
UpperCamelCase__ = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
UpperCamelCase__ = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
UpperCamelCase__ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class _snake_case ( snake_case ):
UpperCamelCase__ = 'train'
UpperCamelCase__ = 'dev'
class _snake_case ( snake_case ):
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
def __init__( self , _a , _a , _a = None , _a = Split.train , _a = False , _a = None , _a = "pt" , ):
__magic_name__ : Optional[Any] = args
__magic_name__ : str = is_language_sensitive
__magic_name__ : Union[str, Any] = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_a , _a ):
try:
__magic_name__ : int = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
__magic_name__ : List[str] = mode
# Load data features from cache or dataset file
__magic_name__ : Union[str, Any] = "v2" if args.version_2_with_negative else "v1"
__magic_name__ : List[str] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__magic_name__ : Optional[Any] = cached_features_file + ".lock"
with FileLock(_a ):
if os.path.exists(_a ) and not args.overwrite_cache:
__magic_name__ : List[Any] = time.time()
__magic_name__ : Dict = torch.load(_a )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__magic_name__ : Dict = self.old_features["features"]
__magic_name__ : Optional[Any] = self.old_features.get("dataset" , _a )
__magic_name__ : str = self.old_features.get("examples" , _a )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
" future run" )
else:
if mode == Split.dev:
__magic_name__ : Optional[int] = self.processor.get_dev_examples(args.data_dir )
else:
__magic_name__ : Optional[Any] = self.processor.get_train_examples(args.data_dir )
__magic_name__ , __magic_name__ : List[Any] = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_a , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_a , )
__magic_name__ : Any = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , _a , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ):
return len(self.features )
def __getitem__( self , _a ):
# Convert to Tensors and build dataset
__magic_name__ : List[Any] = self.features[i]
__magic_name__ : Union[str, Any] = torch.tensor(feature.input_ids , dtype=torch.long )
__magic_name__ : Dict = torch.tensor(feature.attention_mask , dtype=torch.long )
__magic_name__ : List[str] = torch.tensor(feature.token_type_ids , dtype=torch.long )
__magic_name__ : Tuple = torch.tensor(feature.cls_index , dtype=torch.long )
__magic_name__ : str = torch.tensor(feature.p_mask , dtype=torch.float )
__magic_name__ : Dict = torch.tensor(feature.is_impossible , dtype=torch.float )
__magic_name__ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__magic_name__ : Optional[int] = torch.tensor(feature.start_position , dtype=torch.long )
__magic_name__ : List[Any] = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 41 | 0 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=[10, 20, 30, 40] , _SCREAMING_SNAKE_CASE=[1, 1, 2, 1] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Optional[int] = parent
__lowerCAmelCase : int = batch_size
__lowerCAmelCase : Dict = image_size
__lowerCAmelCase : Union[str, Any] = num_channels
__lowerCAmelCase : List[Any] = embeddings_size
__lowerCAmelCase : Tuple = hidden_sizes
__lowerCAmelCase : Dict = depths
__lowerCAmelCase : int = is_training
__lowerCAmelCase : Any = use_labels
__lowerCAmelCase : Union[str, Any] = hidden_act
__lowerCAmelCase : Any = num_labels
__lowerCAmelCase : int = scope
__lowerCAmelCase : List[Any] = len(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase : List[Any] = None
if self.use_labels:
__lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
__lowerCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = TFResNetModel(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = self.num_labels
__lowerCAmelCase : Tuple = TFResNetForImageClassification(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Dict = config_and_inputs
__lowerCAmelCase : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : List[str] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
A_ : Dict = (
{'feature-extraction': TFResNetModel, 'image-classification': TFResNetForImageClassification}
if is_tf_available()
else {}
)
A_ : Any = False
A_ : int = False
A_ : Optional[int] = False
A_ : int = False
A_ : str = False
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = TFResNetModelTester(self )
__lowerCAmelCase : Union[str, Any] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCamelCase ( self ):
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def __lowerCamelCase ( self ):
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase : Dict = [*signature.parameters.keys()]
__lowerCAmelCase : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowerCAmelCase , __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : int = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__lowerCAmelCase : Dict = layer_type
__lowerCAmelCase : Dict = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase : int = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Dict = TFResNetModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ():
__lowerCAmelCase : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class A__ ( unittest.TestCase):
@cached_property
def __lowerCamelCase ( self ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__lowerCAmelCase : Tuple = self.default_image_processor
__lowerCAmelCase : int = prepare_img()
__lowerCAmelCase : List[str] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='tf' )
# forward pass
__lowerCAmelCase : Any = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
__lowerCAmelCase : Optional[Any] = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _SCREAMING_SNAKE_CASE , atol=1E-4 ) ) | 86 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class A_ (unittest.TestCase ):
def _lowercase ( self ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_A , )
assert hasattr(self , '''env''' )
def _lowercase ( self , _A=1 ):
'''simple docstring'''
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , )
def _lowercase ( self , _A ):
'''simple docstring'''
TrainingJobAnalytics(_A ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.create_estimator()
# run training
estimator.fit()
# result dataframe
UpperCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _A )
| 273 | 0 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[str]:
# vision encoder
if "img_encoder.pos_embed" in name:
lowercase : Dict = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
lowercase : Any = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
lowercase : Tuple = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
lowercase : Tuple = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
lowercase : Optional[Any] = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
lowercase : Union[str, Any] = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowercase : Dict = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
lowercase : Dict = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
lowercase : Tuple = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
lowercase : Tuple = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
lowercase : List[Any] = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
lowercase : Tuple = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
lowercase : Any = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
lowercase : Dict = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
lowercase : List[str] = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
lowercase : Tuple = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
lowercase : str = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
lowercase : Union[str, Any] = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
lowercase : Tuple = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
lowercase : Optional[Any] = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
lowercase : List[str] = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
lowercase : Tuple = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
lowercase : List[Any] = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
lowercase : List[Any] = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
for key in orig_state_dict.copy().keys():
lowercase : List[Any] = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowercase : Dict = key.split(""".""" )
lowercase , lowercase : Optional[Any] = int(key_split[2] ), int(key_split[4] )
lowercase : List[Any] = config.vision_config.hidden_size
if "weight" in key:
lowercase : str = val[:dim, :]
lowercase : List[str] = val[dim : dim * 2, :]
lowercase : Optional[int] = val[-dim:, :]
else:
lowercase : Dict = val[:dim]
lowercase : Dict = val[dim : dim * 2]
lowercase : Dict = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowercase : int = key.split(""".""" )
lowercase : Tuple = int(key_split[3] )
lowercase : str = config.text_config.hidden_size
if "weight" in key:
lowercase : Optional[int] = val[:dim, :]
lowercase : Optional[Any] = val[
dim : dim * 2, :
]
lowercase : Optional[int] = val[-dim:, :]
else:
lowercase : Optional[int] = val[:dim]
lowercase : Dict = val[dim : dim * 2]
lowercase : List[str] = val[-dim:]
else:
lowercase : Tuple = rename_key(SCREAMING_SNAKE_CASE__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowercase : str = val.squeeze_()
else:
lowercase : Any = val
return orig_state_dict
def _snake_case( ) -> List[Any]:
lowercase : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase : Any = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="groupvit-gcc-yfcc" , SCREAMING_SNAKE_CASE__=False ) -> str:
lowercase : Dict = GroupViTConfig()
lowercase : Tuple = GroupViTModel(SCREAMING_SNAKE_CASE__ ).eval()
lowercase : List[str] = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )["""model"""]
lowercase : Optional[Any] = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : int = model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(SCREAMING_SNAKE_CASE__ ) == 0)
# verify result
lowercase : Tuple = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
lowercase : Optional[Any] = prepare_img()
lowercase : List[Any] = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
with torch.no_grad():
lowercase : Optional[Any] = model(**SCREAMING_SNAKE_CASE__ )
if model_name == "groupvit-gcc-yfcc":
lowercase : Optional[int] = torch.tensor([[13.3523, 6.3629]] )
elif model_name == "groupvit-gcc-redcaps":
lowercase : int = torch.tensor([[16.1873, 8.6230]] )
else:
raise ValueError(f"Model name {model_name} not supported." )
assert torch.allclose(outputs.logits_per_image , SCREAMING_SNAKE_CASE__ , atol=1e-3 )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print("""Successfully saved processor and model to""" , SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(SCREAMING_SNAKE_CASE__ , organization="""nielsr""" )
model.push_to_hub(SCREAMING_SNAKE_CASE__ , organization="""nielsr""" )
if __name__ == "__main__":
lowercase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
lowercase : Union[str, Any] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 285 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : List[Any] = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class __snake_case ( lowerCAmelCase ):
_a : Dict= "mobilenet_v1"
def __init__( self ,snake_case=3 ,snake_case=224 ,snake_case=1.0 ,snake_case=8 ,snake_case="relu6" ,snake_case=True ,snake_case=0.999 ,snake_case=0.02 ,snake_case=0.001 ,**snake_case ,):
'''simple docstring'''
super().__init__(**snake_case )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
lowercase : int = num_channels
lowercase : Union[str, Any] = image_size
lowercase : int = depth_multiplier
lowercase : Tuple = min_depth
lowercase : Dict = hidden_act
lowercase : Dict = tf_padding
lowercase : Dict = classifier_dropout_prob
lowercase : int = initializer_range
lowercase : List[str] = layer_norm_eps
class __snake_case ( lowerCAmelCase ):
_a : int= version.parse("1.11" )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return 1e-4
| 285 | 1 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
SCREAMING_SNAKE_CASE :Any = get_tests_dir() + '/test_data/fsmt/fsmt_val_data.json'
with io.open(filename, 'r', encoding='utf-8') as f:
SCREAMING_SNAKE_CASE :Union[str, Any] = json.load(f)
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Union[str, Any] ,A : List[str] ):
return FSMTTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase_ ( self : List[str] ,A : int ):
__A = FSMTForConditionalGeneration.from_pretrained(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def UpperCamelCase_ ( self : Any ,A : Union[str, Any] ,A : List[str] ):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
__A = f'''facebook/wmt19-{pair}'''
__A = self.get_tokenizer(SCREAMING_SNAKE_CASE_ )
__A = self.get_model(SCREAMING_SNAKE_CASE_ )
__A = bleu_data[pair]['src']
__A = bleu_data[pair]['tgt']
__A = tokenizer(SCREAMING_SNAKE_CASE_ ,return_tensors="pt" ,truncation=SCREAMING_SNAKE_CASE_ ,padding="longest" ).to(SCREAMING_SNAKE_CASE_ )
__A = model.generate(
input_ids=batch.input_ids ,num_beams=8 ,)
__A = tokenizer.batch_decode(
SCREAMING_SNAKE_CASE_ ,skip_special_tokens=SCREAMING_SNAKE_CASE_ ,clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
__A = calculate_bleu(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(scores["bleu"] ,SCREAMING_SNAKE_CASE_ )
| 15 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def UpperCamelCase ( ) -> tuple[list[int], int]:
UpperCamelCase : int = [randint(-1000 , 1000 ) for i in range(10 )]
UpperCamelCase : Dict = randint(-5000 , 5000 )
return (arr, r)
__UpperCAmelCase = make_dataset()
def UpperCamelCase ( snake_case__ : list[int] , snake_case__ : int ) -> tuple[int, ...]:
for triplet in permutations(snake_case__ , 3 ):
if sum(snake_case__ ) == target:
return tuple(sorted(snake_case__ ) )
return (0, 0, 0)
def UpperCamelCase ( snake_case__ : list[int] , snake_case__ : int ) -> tuple[int, int, int]:
arr.sort()
UpperCamelCase : List[str] = len(snake_case__ )
for i in range(n - 1 ):
UpperCamelCase , UpperCamelCase : Optional[Any] = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def UpperCamelCase ( ) -> tuple[float, float]:
UpperCamelCase : Any = '\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n'
UpperCamelCase : Optional[Any] = '\ntriplet_sum1(*dataset)\n'
UpperCamelCase : Dict = '\ntriplet_sum2(*dataset)\n'
UpperCamelCase : Optional[int] = repeat(setup=snake_case__ , stmt=snake_case__ , repeat=5 , number=10000 )
UpperCamelCase : Any = repeat(setup=snake_case__ , stmt=snake_case__ , repeat=5 , number=10000 )
return (min(snake_case__ ), min(snake_case__ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
__UpperCAmelCase = solution_times()
print(F"""The time for naive implementation is {times[0]}.""")
print(F"""The time for optimized implementation is {times[1]}.""")
| 119 | 0 |
# flake8: noqa
# Lint as: python3
a__: int = [
'VerificationMode',
'Version',
'disable_progress_bar',
'enable_progress_bar',
'is_progress_bar_enabled',
'experimental',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 39 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
a__: Union[str, Any] = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ):
A__ = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = '''A painting of a squirrel eating a burger '''
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=__lowerCamelCase,generator=__lowerCamelCase,guidance_scale=7.5,num_inference_steps=2,output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowerCamelCase )
A__ = VersatileDiffusionTextToImagePipeline.from_pretrained(__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = generator.manual_seed(0 )
A__ = pipe(
prompt=__lowerCamelCase,generator=__lowerCamelCase,guidance_scale=7.5,num_inference_steps=2,output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def UpperCamelCase ( self ):
A__ = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''',torch_dtype=torch.floataa )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = '''A painting of a squirrel eating a burger '''
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=__lowerCamelCase,generator=__lowerCamelCase,guidance_scale=7.5,num_inference_steps=50,output_type='''numpy''' ).images
A__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
A__ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 39 | 1 |
"""simple docstring"""
from maths.prime_factors import prime_factors
def _snake_case ( lowercase__ : int ) -> int:
'''simple docstring'''
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase_ :Any = f"""Input value of [number={number}] must be an integer"""
raise TypeError(UpperCAmelCase_ )
if number < 1:
raise ValueError("""Input must be a positive integer""" )
return -1 if len(prime_factors(UpperCAmelCase_ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84 |
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
if dst_width < 0 or dst_height < 0:
raise ValueError('Destination width/height should be > 0' )
__lowerCamelCase : Dict = img
__lowerCamelCase : Any = img.shape[1]
__lowerCamelCase : Optional[int] = img.shape[0]
__lowerCamelCase : Dict = dst_width
__lowerCamelCase : str = dst_height
__lowerCamelCase : Dict = self.src_w / self.dst_w
__lowerCamelCase : List[Any] = self.src_h / self.dst_h
__lowerCamelCase : Optional[int] = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 2_55
)
def lowercase_ ( self ) -> List[Any]:
for i in range(self.dst_h ):
for j in range(self.dst_w ):
__lowerCamelCase : Union[str, Any] = self.img[self.get_y(SCREAMING_SNAKE_CASE_ )][self.get_x(SCREAMING_SNAKE_CASE_ )]
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> int:
return int(self.ratio_x * x )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> int:
return int(self.ratio_y * y )
if __name__ == "__main__":
A__ , A__ : Optional[Any] = 800, 600
A__ : List[str] = imread("""image_data/lena.jpg""", 1)
A__ : List[Any] = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 185 | 0 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__SCREAMING_SNAKE_CASE = get_logger()
__SCREAMING_SNAKE_CASE = None
class lowerCamelCase_ ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
'''simple docstring'''
def __init__( self : Optional[Any] , __lowerCamelCase : Any=None , __lowerCamelCase : Union[str, Any]=None , **__lowerCamelCase : List[str] ) -> Union[str, Any]:
super().__init__(features=__lowerCamelCase )
import jax
from jaxlib.xla_client import Device
if isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError(
F"""Expected {device} to be a `str` not {type(__lowerCamelCase )}, as `jaxlib.xla_extension.Device` """
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
A : Union[str, Any] = device if isinstance(__lowerCamelCase , __lowerCamelCase ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
A : Tuple = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"""Device with string identifier {self.device} not listed among the available """
F"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
F"""device: {str(jax.devices()[0] )}.""" )
A : Union[str, Any] = str(jax.devices()[0] )
A : Tuple = jnp_array_kwargs
@staticmethod
def SCREAMING_SNAKE_CASE__ ( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
import jax
return {str(__lowerCamelCase ): device for device in jax.devices()}
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : List[Any] ) -> List[str]:
import jax
import jax.numpy as jnp
if isinstance(__lowerCamelCase , __lowerCamelCase ) and column:
if all(
isinstance(__lowerCamelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__lowerCamelCase , axis=0 )
return column
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : List[str] ) -> Optional[Any]:
import jax
import jax.numpy as jnp
if isinstance(__lowerCamelCase , (str, bytes, type(__lowerCamelCase )) ):
return value
elif isinstance(__lowerCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
A : Union[str, Any] = {}
if isinstance(__lowerCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
A : Tuple = {"dtype": jnp.intaa}
else:
A : int = {"dtype": jnp.intaa}
elif isinstance(__lowerCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
A : List[str] = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__lowerCamelCase , PIL.Image.Image ):
A : Optional[int] = np.asarray(__lowerCamelCase )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
A : Dict = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__lowerCamelCase , **{**default_dtype, **self.jnp_array_kwargs} )
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : Dict ) -> Dict:
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__lowerCamelCase , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__lowerCamelCase , "__array__" ) and not isinstance(__lowerCamelCase , jax.Array ):
A : Optional[Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__lowerCamelCase , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__lowerCamelCase ) for substruct in data_struct] )
elif isinstance(__lowerCamelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__lowerCamelCase ) for substruct in data_struct] )
return self._tensorize(__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : dict ) -> Optional[Any]:
return map_nested(self._recursive_tensorize , __lowerCamelCase , map_list=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : pa.Table ) -> Mapping:
A : Optional[int] = self.numpy_arrow_extractor().extract_row(__lowerCamelCase )
A : Optional[int] = self.python_features_decoder.decode_row(__lowerCamelCase )
return self.recursive_tensorize(__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : pa.Table ) -> "jax.Array":
A : Any = self.numpy_arrow_extractor().extract_column(__lowerCamelCase )
A : Dict = self.python_features_decoder.decode_column(__lowerCamelCase , pa_table.column_names[0] )
A : Union[str, Any] = self.recursive_tensorize(__lowerCamelCase )
A : Tuple = self._consolidate(__lowerCamelCase )
return column
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : pa.Table ) -> Mapping:
A : Dict = self.numpy_arrow_extractor().extract_batch(__lowerCamelCase )
A : List[Any] = self.python_features_decoder.decode_batch(__lowerCamelCase )
A : List[str] = self.recursive_tensorize(__lowerCamelCase )
for column_name in batch:
A : List[str] = self._consolidate(batch[column_name] )
return batch | 256 |
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = CLIPConfig
a__ = ["CLIPEncoderLayer"]
def __init__( self : Optional[Any] , __lowerCamelCase : CLIPConfig ) -> Tuple:
super().__init__(__lowerCamelCase )
A : List[Any] = CLIPVisionModelWithProjection(config.vision_config )
A : List[str] = nn.Linear(config.vision_config.projection_dim , 1 )
A : Optional[Any] = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any]=0.5 , __lowerCamelCase : Dict=0.5 ) -> Optional[int]:
A : List[str] = self.vision_model(__lowerCamelCase )[0]
A : Dict = self.p_head(__lowerCamelCase )
A : Dict = nsfw_detected.flatten()
A : Any = nsfw_detected > p_threshold
A : Optional[int] = nsfw_detected.tolist()
if any(__lowerCamelCase ):
logger.warning(
"Potential NSFW content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, nsfw_detected_ in enumerate(__lowerCamelCase ):
if nsfw_detected_:
A : List[str] = np.zeros(images[idx].shape )
A : List[str] = self.w_head(__lowerCamelCase )
A : str = watermark_detected.flatten()
A : List[Any] = watermark_detected > w_threshold
A : List[Any] = watermark_detected.tolist()
if any(__lowerCamelCase ):
logger.warning(
"Potential watermarked content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, watermark_detected_ in enumerate(__lowerCamelCase ):
if watermark_detected_:
A : List[str] = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected | 256 | 1 |
import os
from datetime import datetime as dt
from github import Github
_snake_case = [
"good first issue",
"feature request",
"wip",
]
def lowerCAmelCase_ ( ):
_A : Dict = Github(os.environ["""GITHUB_TOKEN"""] )
_A : Union[str, Any] = g.get_repo("""huggingface/accelerate""" )
_A : List[Any] = repo.get_issues(state="""open""" )
for issue in open_issues:
_A : Tuple = sorted([comment for comment in issue.get_comments()],key=lambda snake_case_ : i.created_at,reverse=snake_case_ )
_A : int = comments[0] if len(snake_case_ ) > 0 else None
_A : Dict = dt.utcnow()
_A : Union[str, Any] = (current_time - issue.updated_at).days
_A : Union[str, Any] = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="""closed""" )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 26 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
_A : Dict ={'''tokenization_herbert''': ['''HerbertTokenizer''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[int] =['''HerbertTokenizerFast''']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
_A : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 | 0 |
def __lowercase ( lowerCamelCase : int ):
if n == 1 or not isinstance(lowerCamelCase , lowerCamelCase ):
return 0
elif n == 2:
return 1
else:
UpperCamelCase_ : int = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __lowercase ( lowerCamelCase : int ):
UpperCamelCase_ : List[str] = 0
UpperCamelCase_ : int = 2
while digits < n:
index += 1
UpperCamelCase_ : Any = len(str(fibonacci(lowerCamelCase ) ) )
return index
def __lowercase ( lowerCamelCase : int = 1000 ):
return fibonacci_digits_index(lowerCamelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 50 | import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
def __init__( self : List[Any] , snake_case : List[str] , snake_case : str=1_3 , snake_case : Any=3_0 , snake_case : Tuple=2 , snake_case : List[Any]=3 , snake_case : str=True , snake_case : List[Any]=True , snake_case : List[Any]=3_2 , snake_case : Union[str, Any]=5 , snake_case : Dict=4 , snake_case : str=3_7 , snake_case : Optional[int]="gelu" , snake_case : Any=0.1 , snake_case : Optional[Any]=0.1 , snake_case : Union[str, Any]=1_0 , snake_case : List[str]=0.02 , snake_case : Optional[int]=3 , snake_case : str=0.6 , snake_case : Any=None , ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : int = parent
UpperCamelCase_ : Optional[Any] = batch_size
UpperCamelCase_ : List[str] = image_size
UpperCamelCase_ : Optional[Any] = patch_size
UpperCamelCase_ : Optional[int] = num_channels
UpperCamelCase_ : Union[str, Any] = is_training
UpperCamelCase_ : Dict = use_labels
UpperCamelCase_ : Tuple = hidden_size
UpperCamelCase_ : str = num_hidden_layers
UpperCamelCase_ : Tuple = num_attention_heads
UpperCamelCase_ : Any = intermediate_size
UpperCamelCase_ : Dict = hidden_act
UpperCamelCase_ : Tuple = hidden_dropout_prob
UpperCamelCase_ : Dict = attention_probs_dropout_prob
UpperCamelCase_ : Any = type_sequence_label_size
UpperCamelCase_ : str = initializer_range
UpperCamelCase_ : Tuple = mask_ratio
UpperCamelCase_ : int = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase_ : List[Any] = (image_size // patch_size) ** 2
UpperCamelCase_ : Dict = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ : List[str] = None
if self.use_labels:
UpperCamelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ : List[str] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case : Dict , snake_case : List[str] , snake_case : List[str] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = ViTMAEModel(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase_ : int = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case : List[str] , snake_case : Optional[int] , snake_case : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = ViTMAEForPreTraining(snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase_ : Tuple = model(snake_case )
UpperCamelCase_ : Tuple = (self.image_size // self.patch_size) ** 2
UpperCamelCase_ : Tuple = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCamelCase_ : Optional[int] = 1
UpperCamelCase_ : Dict = ViTMAEForPreTraining(snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase_ : Tuple = model(snake_case )
UpperCamelCase_ : Optional[Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = self.prepare_config_and_inputs()
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ : int = config_and_inputs
UpperCamelCase_ : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( snake_case_ , snake_case_ , unittest.TestCase ):
lowercase = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowercase = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any:
"""simple docstring"""
UpperCamelCase_ : Any = ViTMAEModelTester(self )
UpperCamelCase_ : Tuple = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : Any = model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase_ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : List[str] = model_class(snake_case )
UpperCamelCase_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ : Union[str, Any] = [*signature.parameters.keys()]
UpperCamelCase_ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int:
"""simple docstring"""
UpperCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
"""simple docstring"""
UpperCamelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case : List[str] , snake_case : Optional[int] , snake_case : Dict ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase_ : Optional[int] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
UpperCamelCase_ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase_ : int = torch.from_numpy(snake_case )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase_ : Tuple = pt_noise
super().check_pt_tf_models(snake_case , snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : Tuple = model_class(snake_case )
model.to(snake_case )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCamelCase_ : str = model(**self._prepare_for_class(snake_case , snake_case ) )
UpperCamelCase_ : Any = outputs[0].cpu().numpy()
UpperCamelCase_ : int = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case )
UpperCamelCase_ : Union[str, Any] = model_class.from_pretrained(snake_case )
model.to(snake_case )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCamelCase_ : Any = model(**self._prepare_for_class(snake_case , snake_case ) )
# Make sure we don't have nans
UpperCamelCase_ : Optional[Any] = after_outputs[0].cpu().numpy()
UpperCamelCase_ : Union[str, Any] = 0
UpperCamelCase_ : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case , 1e-5 )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int:
"""simple docstring"""
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> int:
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[Any]:
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ : Dict = ViTMAEModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def __lowercase ( ):
UpperCamelCase_ : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase_ : List[str] = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ).to(snake_case )
UpperCamelCase_ : Tuple = self.default_image_processor
UpperCamelCase_ : Union[str, Any] = prepare_img()
UpperCamelCase_ : Optional[int] = image_processor(images=snake_case , return_tensors='pt' ).to(snake_case )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase_ : int = ViTMAEConfig()
UpperCamelCase_ : Any = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCamelCase_ : List[str] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
UpperCamelCase_ : str = model(**snake_case , noise=torch.from_numpy(snake_case ).to(device=snake_case ) )
# verify the logits
UpperCamelCase_ : Dict = torch.Size((1, 1_9_6, 7_6_8) )
self.assertEqual(outputs.logits.shape , snake_case )
UpperCamelCase_ : Union[str, Any] = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(snake_case ) , atol=1e-4 ) )
| 50 | 1 |
import os
import sys
import unittest
_UpperCAmelCase : Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_UpperCAmelCase : Dict = os.path.join(git_repo_path, """src""", """diffusers""")
class lowercase ( unittest.TestCase ):
def a ( self ):
snake_case_ = find_backend(' if not is_torch_available():' )
self.assertEqual(snake_case , 'torch' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
snake_case_ = find_backend(' if not (is_torch_available() and is_transformers_available()):' )
self.assertEqual(snake_case , 'torch_and_transformers' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
snake_case_ = find_backend(
' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):' )
self.assertEqual(snake_case , 'torch_and_transformers_and_onnx' )
def a ( self ):
snake_case_ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , snake_case )
self.assertIn('torch_and_transformers' , snake_case )
self.assertIn('flax_and_transformers' , snake_case )
self.assertIn('torch_and_transformers_and_onnx' , snake_case )
# Likewise, we can't assert on the exact content of a key
self.assertIn('UNet2DModel' , objects['torch'] )
self.assertIn('FlaxUNet2DConditionModel' , objects['flax'] )
self.assertIn('StableDiffusionPipeline' , objects['torch_and_transformers'] )
self.assertIn('FlaxStableDiffusionPipeline' , objects['flax_and_transformers'] )
self.assertIn('LMSDiscreteScheduler' , objects['torch_and_scipy'] )
self.assertIn('OnnxStableDiffusionPipeline' , objects['torch_and_transformers_and_onnx'] )
def a ( self ):
snake_case_ = create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(snake_case , '\nCONSTANT = None\n' )
snake_case_ = create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
snake_case , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
snake_case_ = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n'
snake_case_ = create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(snake_case , snake_case )
def a ( self ):
snake_case_ = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n'
snake_case_ = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , snake_case )
| 285 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = ['''input_features''', '''is_longer''']
def __init__( self , snake_case=64 , snake_case=4_8000 , snake_case=480 , snake_case=10 , snake_case=1024 , snake_case=0.0 , snake_case=False , snake_case = 0 , snake_case = 1_4000 , snake_case = None , snake_case = "fusion" , snake_case = "repeatpad" , **snake_case , ):
super().__init__(
feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , return_attention_mask=snake_case , **snake_case , )
snake_case_ = top_db
snake_case_ = truncation
snake_case_ = padding
snake_case_ = fft_window_size
snake_case_ = (fft_window_size >> 1) + 1
snake_case_ = hop_length
snake_case_ = max_length_s
snake_case_ = max_length_s * sampling_rate
snake_case_ = sampling_rate
snake_case_ = frequency_min
snake_case_ = frequency_max
snake_case_ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case , min_frequency=snake_case , max_frequency=snake_case , sampling_rate=snake_case , norm=snake_case , mel_scale='htk' , )
snake_case_ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case , min_frequency=snake_case , max_frequency=snake_case , sampling_rate=snake_case , norm='slaney' , mel_scale='slaney' , )
def a ( self ):
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def a ( self , snake_case , snake_case = None ):
snake_case_ = spectrogram(
snake_case , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=snake_case , log_mel='dB' , )
return log_mel_spectrogram.T
def a ( self , snake_case , snake_case , snake_case ):
snake_case_ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
snake_case_ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
snake_case_ = [0]
# randomly choose index for each part
snake_case_ = np.random.choice(ranges[0] )
snake_case_ = np.random.choice(ranges[1] )
snake_case_ = np.random.choice(ranges[2] )
snake_case_ = mel[idx_front : idx_front + chunk_frames, :]
snake_case_ = mel[idx_middle : idx_middle + chunk_frames, :]
snake_case_ = mel[idx_back : idx_back + chunk_frames, :]
snake_case_ = torch.tensor(mel[None, None, :] )
snake_case_ = torch.nn.functional.interpolate(
snake_case , size=[chunk_frames, 64] , mode='bilinear' , align_corners=snake_case )
snake_case_ = mel_shrink[0][0].numpy()
snake_case_ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def a ( self , snake_case , snake_case , snake_case , snake_case ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
snake_case_ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
snake_case_ = len(snake_case ) - max_length
snake_case_ = np.random.randint(0 , overflow + 1 )
snake_case_ = waveform[idx : idx + max_length]
snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters )
snake_case_ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
snake_case_ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
snake_case_ = np.stack([mel, mel, mel, mel] , axis=0 )
snake_case_ = False
else:
snake_case_ = self._random_mel_fusion(snake_case , snake_case , snake_case )
snake_case_ = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
snake_case_ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
snake_case_ = int(max_length / len(snake_case ) )
snake_case_ = np.stack(np.tile(snake_case , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
snake_case_ = int(max_length / len(snake_case ) )
snake_case_ = np.stack(np.tile(snake_case , snake_case ) )
snake_case_ = np.pad(snake_case , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters )
snake_case_ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , **snake_case , ):
snake_case_ = truncation if truncation is not None else self.truncation
snake_case_ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
snake_case_ = isinstance(snake_case , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
snake_case_ = is_batched_numpy or (
isinstance(snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
snake_case_ = [np.asarray(snake_case , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case , np.ndarray ):
snake_case_ = np.asarray(snake_case , dtype=np.floataa )
elif isinstance(snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
snake_case_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
snake_case_ = [np.asarray(snake_case )]
# convert to mel spectrogram, truncate and pad if needed.
snake_case_ = [
self._get_input_mel(snake_case , max_length if max_length else self.nb_max_samples , snake_case , snake_case )
for waveform in raw_speech
]
snake_case_ = []
snake_case_ = []
for mel, longer in padded_inputs:
input_mel.append(snake_case )
is_longer.append(snake_case )
if truncation == "fusion" and sum(snake_case ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
snake_case_ = np.random.randint(0 , len(snake_case ) )
snake_case_ = True
if isinstance(input_mel[0] , snake_case ):
snake_case_ = [np.asarray(snake_case , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
snake_case_ = [[longer] for longer in is_longer]
snake_case_ = {'input_features': input_mel, 'is_longer': is_longer}
snake_case_ = BatchFeature(snake_case )
if return_tensors is not None:
snake_case_ = input_features.convert_to_tensors(snake_case )
return input_features
| 285 | 1 |
'''simple docstring'''
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = XCLIPTextConfig()
# derive patch size from model name
__lowerCAmelCase = model_name.find('''patch''')
__lowerCAmelCase = int(model_name[start_idx + len('''patch''') : start_idx + len('''patch''') + 2])
__lowerCAmelCase = XCLIPVisionConfig(patch_size=lowerCamelCase, num_frames=lowerCamelCase)
if "large" in model_name:
__lowerCAmelCase = 7_6_8
__lowerCAmelCase = 3_0_7_2
__lowerCAmelCase = 1_2
__lowerCAmelCase = 1_0_2_4
__lowerCAmelCase = 4_0_9_6
__lowerCAmelCase = 1_6
__lowerCAmelCase = 2_4
__lowerCAmelCase = 7_6_8
__lowerCAmelCase = 3_0_7_2
if model_name == "xclip-large-patch14-16-frames":
__lowerCAmelCase = 3_3_6
__lowerCAmelCase = XCLIPConfig.from_text_vision_configs(lowerCamelCase, lowerCamelCase)
if "large" in model_name:
__lowerCAmelCase = 7_6_8
return config
def __magic_name__( lowerCamelCase):
# text encoder
if name == "token_embedding.weight":
__lowerCAmelCase = name.replace('''token_embedding.weight''', '''text_model.embeddings.token_embedding.weight''')
if name == "positional_embedding":
__lowerCAmelCase = name.replace('''positional_embedding''', '''text_model.embeddings.position_embedding.weight''')
if "ln_1" in name:
__lowerCAmelCase = name.replace('''ln_1''', '''layer_norm1''')
if "ln_2" in name:
__lowerCAmelCase = name.replace('''ln_2''', '''layer_norm2''')
if "c_fc" in name:
__lowerCAmelCase = name.replace('''c_fc''', '''fc1''')
if "c_proj" in name:
__lowerCAmelCase = name.replace('''c_proj''', '''fc2''')
if name.startswith('''transformer.resblocks'''):
__lowerCAmelCase = name.replace('''transformer.resblocks''', '''text_model.encoder.layers''')
if "attn.out_proj" in name and "message" not in name:
__lowerCAmelCase = name.replace('''attn.out_proj''', '''self_attn.out_proj''')
if "ln_final" in name:
__lowerCAmelCase = name.replace('''ln_final''', '''text_model.final_layer_norm''')
# visual encoder
if name == "visual.class_embedding":
__lowerCAmelCase = name.replace('''visual.class_embedding''', '''vision_model.embeddings.class_embedding''')
if name == "visual.positional_embedding":
__lowerCAmelCase = name.replace('''visual.positional_embedding''', '''vision_model.embeddings.position_embedding.weight''')
if name.startswith('''visual.transformer.resblocks'''):
__lowerCAmelCase = name.replace('''visual.transformer.resblocks''', '''vision_model.encoder.layers''')
if "visual.conv1" in name:
__lowerCAmelCase = name.replace('''visual.conv1''', '''vision_model.embeddings.patch_embedding''')
if "visual.ln_pre" in name:
__lowerCAmelCase = name.replace('''visual.ln_pre''', '''vision_model.pre_layernorm''')
if "visual.ln_post" in name:
__lowerCAmelCase = name.replace('''visual.ln_post''', '''vision_model.post_layernorm''')
if "visual.proj" in name:
__lowerCAmelCase = name.replace('''visual.proj''', '''visual_projection.weight''')
if "text_projection" in name:
__lowerCAmelCase = name.replace('''text_projection''', '''text_projection.weight''')
# things on top
if "prompts_visual_proj" in name:
__lowerCAmelCase = name.replace('''prompts_visual_proj''', '''prompts_visual_projection''')
if "prompts_visual_ln" in name:
__lowerCAmelCase = name.replace('''prompts_visual_ln''', '''prompts_visual_layernorm''')
# mit
if name == "mit.positional_embedding":
__lowerCAmelCase = name.replace('''positional''', '''position''')
if name.startswith('''mit.resblocks'''):
__lowerCAmelCase = name.replace('''mit.resblocks''', '''mit.encoder.layers''')
# prompts generator
if name.startswith('''prompts_generator.norm'''):
__lowerCAmelCase = name.replace('''prompts_generator.norm''', '''prompts_generator.layernorm''')
return name
def __magic_name__( lowerCamelCase, lowerCamelCase):
for key in orig_state_dict.copy().keys():
__lowerCAmelCase = orig_state_dict.pop(lowerCamelCase)
if "attn.in_proj" in key:
__lowerCAmelCase = key.split('''.''')
if key.startswith('''visual'''):
__lowerCAmelCase = key_split[3]
__lowerCAmelCase = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__lowerCAmelCase = val[
:dim, :
]
__lowerCAmelCase = val[
dim : dim * 2, :
]
__lowerCAmelCase = val[
-dim:, :
]
else:
__lowerCAmelCase = val[
:dim
]
__lowerCAmelCase = val[
dim : dim * 2
]
__lowerCAmelCase = val[
-dim:
]
else:
if "weight" in key:
__lowerCAmelCase = val[
:dim, :
]
__lowerCAmelCase = val[
dim : dim * 2, :
]
__lowerCAmelCase = val[
-dim:, :
]
else:
__lowerCAmelCase = val[:dim]
__lowerCAmelCase = val[
dim : dim * 2
]
__lowerCAmelCase = val[-dim:]
elif key.startswith('''mit'''):
__lowerCAmelCase = key_split[2]
__lowerCAmelCase = config.vision_config.mit_hidden_size
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val[:dim]
__lowerCAmelCase = val[dim : dim * 2]
__lowerCAmelCase = val[-dim:]
else:
__lowerCAmelCase = key_split[2]
__lowerCAmelCase = config.text_config.hidden_size
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[
dim : dim * 2, :
]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val[:dim]
__lowerCAmelCase = val[
dim : dim * 2
]
__lowerCAmelCase = val[-dim:]
else:
__lowerCAmelCase = rename_key(lowerCamelCase)
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__lowerCAmelCase = val.T
__lowerCAmelCase = val
return orig_state_dict
def __magic_name__( lowerCamelCase):
if num_frames == 8:
__lowerCAmelCase = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 1_6:
__lowerCAmelCase = '''eating_spaghetti.npy'''
elif num_frames == 3_2:
__lowerCAmelCase = '''eating_spaghetti_32_frames.npy'''
__lowerCAmelCase = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''', filename=lowerCamelCase, repo_type='''dataset''', )
__lowerCAmelCase = np.load(lowerCamelCase)
return list(lowerCamelCase)
def __magic_name__( lowerCamelCase, lowerCamelCase=None, lowerCamelCase=False):
__lowerCAmelCase = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
__lowerCAmelCase = model_to_url[model_name]
__lowerCAmelCase = 8
if "16-frames" in model_name:
__lowerCAmelCase = 1_6
elif "shot" in model_name:
__lowerCAmelCase = 3_2
__lowerCAmelCase = get_xclip_config(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = XCLIPModel(lowerCamelCase)
model.eval()
if "drive" in checkpoint_url:
__lowerCAmelCase = '''pytorch_model.bin'''
gdown.cached_download(lowerCamelCase, lowerCamelCase, quiet=lowerCamelCase)
__lowerCAmelCase = torch.load(lowerCamelCase, map_location='''cpu''')['''model''']
else:
__lowerCAmelCase = torch.hub.load_state_dict_from_url(lowerCamelCase)['''model''']
__lowerCAmelCase = convert_state_dict(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = XCLIPModel(lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = model.load_state_dict(lowerCamelCase, strict=lowerCamelCase)
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__lowerCAmelCase = 3_3_6 if model_name == '''xclip-large-patch14-16-frames''' else 2_2_4
__lowerCAmelCase = VideoMAEImageProcessor(size=lowerCamelCase)
__lowerCAmelCase = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''')
__lowerCAmelCase = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''')
__lowerCAmelCase = XCLIPProcessor(image_processor=lowerCamelCase, tokenizer=lowerCamelCase)
__lowerCAmelCase = prepare_video(lowerCamelCase)
__lowerCAmelCase = processor(
text=['''playing sports''', '''eating spaghetti''', '''go shopping'''], videos=lowerCamelCase, return_tensors='''pt''', padding=lowerCamelCase)
print('''Shape of pixel values:''', inputs.pixel_values.shape)
with torch.no_grad():
__lowerCAmelCase = model(**lowerCamelCase)
# Verify outputs
__lowerCAmelCase = outputs.logits_per_video
__lowerCAmelCase = logits_per_video.softmax(dim=1)
print('''Probs:''', lowerCamelCase)
# kinetics-400
if model_name == "xclip-base-patch32":
__lowerCAmelCase = torch.tensor([[0.00_19, 0.99_51, 0.00_30]])
elif model_name == "xclip-base-patch32-16-frames":
__lowerCAmelCase = torch.tensor([[7.09_99E-04, 9.98_83E-01, 4.55_80E-04]])
elif model_name == "xclip-base-patch16":
__lowerCAmelCase = torch.tensor([[0.00_83, 0.96_81, 0.02_36]])
elif model_name == "xclip-base-patch16-16-frames":
__lowerCAmelCase = torch.tensor([[7.69_37E-04, 9.97_28E-01, 1.94_73E-03]])
elif model_name == "xclip-large-patch14":
__lowerCAmelCase = torch.tensor([[0.00_62, 0.98_64, 0.00_75]])
elif model_name == "xclip-large-patch14-16-frames":
__lowerCAmelCase = torch.tensor([[3.38_77E-04, 9.99_37E-01, 2.88_88E-04]])
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__lowerCAmelCase = torch.tensor([[0.05_55, 0.89_14, 0.05_31]])
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__lowerCAmelCase = torch.tensor([[3.85_54E-04, 9.99_29E-01, 3.27_54E-04]])
elif model_name == "xclip-large-patch14-kinetics-600":
__lowerCAmelCase = torch.tensor([[0.00_36, 0.99_20, 0.00_45]])
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__lowerCAmelCase = torch.tensor([[7.18_90E-06, 9.99_94E-01, 5.65_59E-05]])
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__lowerCAmelCase = torch.tensor([[1.03_20E-05, 9.99_93E-01, 6.24_35E-05]])
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__lowerCAmelCase = torch.tensor([[4.13_77E-06, 9.99_90E-01, 9.83_86E-05]])
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__lowerCAmelCase = torch.tensor([[4.13_47E-05, 9.99_62E-01, 3.34_11E-04]])
elif model_name == "xclip-base-patch16-ucf-2-shot":
__lowerCAmelCase = torch.tensor([[8.58_57E-05, 9.99_28E-01, 6.32_91E-04]])
elif model_name == "xclip-base-patch16-ucf-4-shot":
__lowerCAmelCase = torch.tensor([[8.58_57E-05, 9.99_28E-01, 6.32_91E-04]])
elif model_name == "xclip-base-patch16-ucf-8-shot":
__lowerCAmelCase = torch.tensor([[0.00_27, 0.99_04, 0.00_70]])
elif model_name == "xclip-base-patch16-ucf-16-shot":
__lowerCAmelCase = torch.tensor([[9.82_19E-04, 9.95_93E-01, 3.08_63E-03]])
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__lowerCAmelCase = torch.tensor([[3.50_82E-04, 9.97_85E-01, 1.79_66E-03]])
else:
raise ValueError(F"""Model name {model_name} not supported""")
assert torch.allclose(lowerCamelCase, lowerCamelCase, atol=1E-3)
print('''Looks ok!''')
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""")
model.save_pretrained(lowerCamelCase)
if push_to_hub:
print('''Pushing model, processor and slow tokenizer files to the hub...''')
model.push_to_hub(lowerCamelCase, organization='''nielsr''')
processor.push_to_hub(lowerCamelCase, organization='''nielsr''')
slow_tokenizer.push_to_hub(lowerCamelCase, organization='''nielsr''')
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_UpperCAmelCase : Optional[Any] = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 9 |
'''simple docstring'''
# Imports
import numpy as np
class a__ :
"""simple docstring"""
def __init__(self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None ):
self.set_matricies(red=__lowercase , green=__lowercase , blue=__lowercase , red_edge=__lowercase , nir=__lowercase )
def _snake_case (self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None ):
if red is not None:
__lowerCAmelCase = red
if green is not None:
__lowerCAmelCase = green
if blue is not None:
__lowerCAmelCase = blue
if red_edge is not None:
__lowerCAmelCase = red_edge
if nir is not None:
__lowerCAmelCase = nir
return True
def _snake_case (self , __lowercase="" , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None ):
self.set_matricies(red=__lowercase , green=__lowercase , blue=__lowercase , red_edge=__lowercase , nir=__lowercase )
__lowerCAmelCase = {
'''ARVI2''': self.arvaa,
'''CCCI''': self.ccci,
'''CVI''': self.cvi,
'''GLI''': self.gli,
'''NDVI''': self.ndvi,
'''BNDVI''': self.bndvi,
'''redEdgeNDVI''': self.red_edge_ndvi,
'''GNDVI''': self.gndvi,
'''GBNDVI''': self.gbndvi,
'''GRNDVI''': self.grndvi,
'''RBNDVI''': self.rbndvi,
'''PNDVI''': self.pndvi,
'''ATSAVI''': self.atsavi,
'''BWDRVI''': self.bwdrvi,
'''CIgreen''': self.ci_green,
'''CIrededge''': self.ci_rededge,
'''CI''': self.ci,
'''CTVI''': self.ctvi,
'''GDVI''': self.gdvi,
'''EVI''': self.evi,
'''GEMI''': self.gemi,
'''GOSAVI''': self.gosavi,
'''GSAVI''': self.gsavi,
'''Hue''': self.hue,
'''IVI''': self.ivi,
'''IPVI''': self.ipvi,
'''I''': self.i,
'''RVI''': self.rvi,
'''MRVI''': self.mrvi,
'''MSAVI''': self.m_savi,
'''NormG''': self.norm_g,
'''NormNIR''': self.norm_nir,
'''NormR''': self.norm_r,
'''NGRDI''': self.ngrdi,
'''RI''': self.ri,
'''S''': self.s,
'''IF''': self._if,
'''DVI''': self.dvi,
'''TVI''': self.tvi,
'''NDRE''': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('''Index not in the list!''' )
return False
def _snake_case (self ):
return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red)))
def _snake_case (self ):
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def _snake_case (self ):
return self.nir * (self.red / (self.green**2))
def _snake_case (self ):
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def _snake_case (self ):
return (self.nir - self.red) / (self.nir + self.red)
def _snake_case (self ):
return (self.nir - self.blue) / (self.nir + self.blue)
def _snake_case (self ):
return (self.redEdge - self.red) / (self.redEdge + self.red)
def _snake_case (self ):
return (self.nir - self.green) / (self.nir + self.green)
def _snake_case (self ):
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def _snake_case (self ):
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def _snake_case (self ):
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def _snake_case (self ):
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def _snake_case (self , __lowercase=0.0_8 , __lowercase=1.2_2 , __lowercase=0.0_3 ):
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def _snake_case (self ):
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def _snake_case (self ):
return (self.nir / self.green) - 1
def _snake_case (self ):
return (self.nir / self.redEdge) - 1
def _snake_case (self ):
return (self.red - self.blue) / self.red
def _snake_case (self ):
__lowerCAmelCase = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def _snake_case (self ):
return self.nir - self.green
def _snake_case (self ):
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def _snake_case (self ):
__lowerCAmelCase = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red)
def _snake_case (self , __lowercase=0.1_6 ):
return (self.nir - self.green) / (self.nir + self.green + y)
def _snake_case (self , __lowercase=0.5 ):
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def _snake_case (self ):
return np.arctan(
((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) )
def _snake_case (self , __lowercase=None , __lowercase=None ):
return (self.nir - b) / (a * self.red)
def _snake_case (self ):
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def _snake_case (self ):
return (self.red + self.green + self.blue) / 3_0.5
def _snake_case (self ):
return self.nir / self.red
def _snake_case (self ):
return (self.rvi() - 1) / (self.rvi() + 1)
def _snake_case (self ):
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def _snake_case (self ):
return self.green / (self.nir + self.red + self.green)
def _snake_case (self ):
return self.nir / (self.nir + self.red + self.green)
def _snake_case (self ):
return self.red / (self.nir + self.red + self.green)
def _snake_case (self ):
return (self.green - self.red) / (self.green + self.red)
def _snake_case (self ):
return (self.red - self.green) / (self.red + self.green)
def _snake_case (self ):
__lowerCAmelCase = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
__lowerCAmelCase = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def _snake_case (self ):
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def _snake_case (self ):
return self.nir / self.red
def _snake_case (self ):
return (self.ndvi() + 0.5) ** (1 / 2)
def _snake_case (self ):
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 9 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
_UpperCAmelCase = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
sd_pipe.set_scheduler('sample_euler' )
_UpperCAmelCase = 'A painting of a squirrel eating a burger'
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = sd_pipe([prompt] , generator=UpperCAmelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' )
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array([0.04_47, 0.04_92, 0.04_68, 0.04_08, 0.03_83, 0.04_08, 0.03_54, 0.03_80, 0.03_39] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
_UpperCAmelCase = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
sd_pipe.set_scheduler('sample_euler' )
_UpperCAmelCase = 'A painting of a squirrel eating a burger'
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = sd_pipe([prompt] , generator=UpperCAmelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' )
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array([0.12_37, 0.13_20, 0.14_38, 0.13_59, 0.13_90, 0.11_32, 0.12_77, 0.11_75, 0.11_12] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
_UpperCAmelCase = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
_UpperCAmelCase = 'A painting of a squirrel eating a burger'
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = sd_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=15 , output_type='np' , use_karras_sigmas=UpperCAmelCase , )
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array(
[0.11_38_16_89, 0.12_11_29_21, 0.1_38_94_57, 0.12_54_96_06, 0.1_24_49_64, 0.10_83_15_17, 0.11_56_28_66, 0.10_86_78_16, 0.10_49_90_48] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 39 |
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_a = 2
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , *, # begin keyword-only arguments
UpperCAmelCase="<s>" , UpperCAmelCase="<pad>" , UpperCAmelCase="</s>" , UpperCAmelCase="<unk>" , UpperCAmelCase=None , ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = bos, unk, pad, eos
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = {}
_UpperCAmelCase = self.add_symbol(UpperCAmelCase )
_UpperCAmelCase = self.add_symbol(UpperCAmelCase )
_UpperCAmelCase = self.add_symbol(UpperCAmelCase )
_UpperCAmelCase = self.add_symbol(UpperCAmelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(UpperCAmelCase )
_UpperCAmelCase = len(self.symbols )
def __eq__( self , UpperCAmelCase ):
"""simple docstring"""
return self.indices == other.indices
def __getitem__( self , UpperCAmelCase ):
"""simple docstring"""
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self ):
"""simple docstring"""
return len(self.symbols )
def __contains__( self , UpperCAmelCase ):
"""simple docstring"""
return sym in self.indices
@classmethod
def UpperCamelCase ( cls , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = cls()
d.add_from_file(UpperCAmelCase )
return d
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase=1 , UpperCAmelCase=False ):
"""simple docstring"""
if word in self.indices and not overwrite:
_UpperCAmelCase = self.indices[word]
_UpperCAmelCase = self.count[idx] + n
return idx
else:
_UpperCAmelCase = len(self.symbols )
_UpperCAmelCase = idx
self.symbols.append(UpperCAmelCase )
self.count.append(UpperCAmelCase )
return idx
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
return 0
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
if isinstance(UpperCAmelCase , UpperCAmelCase ):
try:
with open(UpperCAmelCase , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(UpperCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(UpperCAmelCase ) )
return
_UpperCAmelCase = f.readlines()
_UpperCAmelCase = self._load_meta(UpperCAmelCase )
for line in lines[indices_start_line:]:
try:
_UpperCAmelCase , _UpperCAmelCase = line.rstrip().rsplit(' ' , 1 )
if field == "#fairseq:overwrite":
_UpperCAmelCase = True
_UpperCAmelCase , _UpperCAmelCase = line.rsplit(' ' , 1 )
else:
_UpperCAmelCase = False
_UpperCAmelCase = int(UpperCAmelCase )
_UpperCAmelCase = line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(UpperCAmelCase ) )
self.add_symbol(UpperCAmelCase , n=UpperCAmelCase , overwrite=UpperCAmelCase )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def __A ( __lowerCAmelCase )-> str:
"""simple docstring"""
_UpperCAmelCase = dict((re.sub(R'@@$' , '' , __lowerCAmelCase ), v) if k.endswith('@@' ) else (re.sub(R'$' , '</w>' , __lowerCAmelCase ), v) for k, v in d.items() )
_UpperCAmelCase = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
_UpperCAmelCase = d[k] # restore
return da
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> str:
"""simple docstring"""
if not os.path.exists(__lowerCAmelCase ):
raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
print(F"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
_UpperCAmelCase = os.path.join(__lowerCAmelCase , 'checkpoint.pt' )
if not os.path.isfile(__lowerCAmelCase ):
raise ValueError(F"""path to the file {checkpoint_file} does not exist!""" )
_UpperCAmelCase = torch.load(__lowerCAmelCase , map_location='cpu' )
_UpperCAmelCase = chkpt['cfg']['model']
# dicts
_UpperCAmelCase = os.path.join(__lowerCAmelCase , 'dict.txt' )
if not os.path.isfile(__lowerCAmelCase ):
raise ValueError(F"""path to the file {dict_file} does not exist!""" )
_UpperCAmelCase = Dictionary.load(__lowerCAmelCase )
_UpperCAmelCase = rewrite_dict_keys(src_dict.indices )
_UpperCAmelCase = len(__lowerCAmelCase )
_UpperCAmelCase = os.path.join(__lowerCAmelCase , VOCAB_FILES_NAMES['vocab_file'] )
print(F"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(__lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__lowerCAmelCase , ensure_ascii=__lowerCAmelCase , indent=__lowerCAmelCase ) )
# merges_file (bpecodes)
_UpperCAmelCase = os.path.join(__lowerCAmelCase , 'bpecodes' )
if not os.path.isfile(__lowerCAmelCase ):
raise ValueError(F"""path to the file {bpecodes_file} does not exist!""" )
_UpperCAmelCase = os.path.join(__lowerCAmelCase , VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(__lowerCAmelCase , __lowerCAmelCase )
# model config
_UpperCAmelCase = os.path.join(__lowerCAmelCase , 'config.json' )
_UpperCAmelCase = {
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.02,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1E-12,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(F"""Generating {biogpt_model_config_file}""" )
with open(__lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__lowerCAmelCase , ensure_ascii=__lowerCAmelCase , indent=__lowerCAmelCase ) )
# tokenizer config
_UpperCAmelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = {
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 1_024,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(F"""Generating {biogpt_tokenizer_config_file}""" )
with open(__lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__lowerCAmelCase , ensure_ascii=__lowerCAmelCase , indent=__lowerCAmelCase ) )
# model
_UpperCAmelCase = chkpt['model']
# remove unneeded keys
_UpperCAmelCase = [
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
_UpperCAmelCase = model_state_dict.pop(__lowerCAmelCase )
else:
_UpperCAmelCase = model_state_dict.pop(__lowerCAmelCase )
_UpperCAmelCase = BioGptConfig.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase = BioGptForCausalLM(__lowerCAmelCase )
# check that it loads ok
model_new.load_state_dict(__lowerCAmelCase )
# save
_UpperCAmelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
print(F"""Generating {pytorch_weights_dump_path}""" )
torch.save(__lowerCAmelCase , __lowerCAmelCase )
print('Conversion is done!' )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_a = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 39 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def __lowerCamelCase ( snake_case__ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """huggingface/label-files"""
_SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
_SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(snake_case__ ,snake_case__ ,repo_type="""dataset""" ) ,"""r""" ) )
_SCREAMING_SNAKE_CASE = {int(snake_case__ ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE = """std_conv""" if """bit""" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
_SCREAMING_SNAKE_CASE = BitConfig(
conv_layer=snake_case__ ,num_labels=10_00 ,idalabel=snake_case__ ,labelaid=snake_case__ ,)
return config
def __lowerCamelCase ( snake_case__ ) -> Optional[int]:
"""simple docstring"""
if "stem.conv" in name:
_SCREAMING_SNAKE_CASE = name.replace("""stem.conv""" ,"""bit.embedder.convolution""" )
if "blocks" in name:
_SCREAMING_SNAKE_CASE = name.replace("""blocks""" ,"""layers""" )
if "head.fc" in name:
_SCREAMING_SNAKE_CASE = name.replace("""head.fc""" ,"""classifier.1""" )
if name.startswith("""norm""" ):
_SCREAMING_SNAKE_CASE = """bit.""" + name
if "bit" not in name and "classifier" not in name:
_SCREAMING_SNAKE_CASE = """bit.encoder.""" + name
return name
def __lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_SCREAMING_SNAKE_CASE = Image.open(requests.get(snake_case__ ,stream=snake_case__ ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__=False ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = get_config(snake_case__ )
# load original model from timm
_SCREAMING_SNAKE_CASE = create_model(snake_case__ ,pretrained=snake_case__ )
timm_model.eval()
# load state_dict of original model
_SCREAMING_SNAKE_CASE = timm_model.state_dict()
for key in state_dict.copy().keys():
_SCREAMING_SNAKE_CASE = state_dict.pop(snake_case__ )
_SCREAMING_SNAKE_CASE = val.squeeze() if """head""" in key else val
# load HuggingFace model
_SCREAMING_SNAKE_CASE = BitForImageClassification(snake_case__ )
model.eval()
model.load_state_dict(snake_case__ )
# create image processor
_SCREAMING_SNAKE_CASE = create_transform(**resolve_data_config({} ,model=snake_case__ ) )
_SCREAMING_SNAKE_CASE = transform.transforms
_SCREAMING_SNAKE_CASE = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
_SCREAMING_SNAKE_CASE = BitImageProcessor(
do_resize=snake_case__ ,size={"""shortest_edge""": timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=snake_case__ ,crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} ,do_normalize=snake_case__ ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,)
_SCREAMING_SNAKE_CASE = prepare_img()
_SCREAMING_SNAKE_CASE = transform(snake_case__ ).unsqueeze(0 )
_SCREAMING_SNAKE_CASE = processor(snake_case__ ,return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(snake_case__ ,snake_case__ )
# verify logits
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(snake_case__ )
_SCREAMING_SNAKE_CASE = outputs.logits
print("""Logits:""" ,logits[0, :3] )
print("""Predicted class:""" ,model.config.idalabel[logits.argmax(-1 ).item()] )
_SCREAMING_SNAKE_CASE = timm_model(snake_case__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(snake_case__ ,outputs.logits ,atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F'Saving model {model_name} and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case__ )
processor.save_pretrained(snake_case__ )
if push_to_hub:
print(F'Pushing model {model_name} and processor to the hub' )
model.push_to_hub(F'ybelkada/{model_name}' )
processor.push_to_hub(F'ybelkada/{model_name}' )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''resnetv2_50x1_bitm''',
type=str,
help='''Name of the BiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model to the hub.''',
)
UpperCamelCase = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 125 |
import copy
import re
class __UpperCAmelCase :
__snake_case : Any = "hp"
__snake_case : str = {}
__snake_case : List[Any] = None
@classmethod
def UpperCamelCase ( cls: Optional[Any] , UpperCAmelCase_: int , UpperCAmelCase_: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = prefix
_SCREAMING_SNAKE_CASE = defaults
cls.build_naming_info()
@staticmethod
def UpperCamelCase ( UpperCAmelCase_: Any , UpperCAmelCase_: str ):
'''simple docstring'''
if len(UpperCAmelCase_ ) == 0:
return ""
_SCREAMING_SNAKE_CASE = None
if any(char.isdigit() for char in word ):
raise Exception(F'Parameters should not contain numbers: \'{word}\' contains a number' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(UpperCAmelCase_ ) + 1 ):
_SCREAMING_SNAKE_CASE = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
_SCREAMING_SNAKE_CASE = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(UpperCAmelCase_: List[Any] ):
_SCREAMING_SNAKE_CASE = """"""
while integer != 0:
_SCREAMING_SNAKE_CASE = chr(ord("""A""" ) + integer % 10 ) + s
integer //= 10
return s
_SCREAMING_SNAKE_CASE = 0
while True:
_SCREAMING_SNAKE_CASE = word + """#""" + int_to_alphabetic(UpperCAmelCase_ )
if sword in info["reverse_short_word"]:
continue
else:
_SCREAMING_SNAKE_CASE = sword
break
_SCREAMING_SNAKE_CASE = short_word
_SCREAMING_SNAKE_CASE = word
return short_word
@staticmethod
def UpperCamelCase ( UpperCAmelCase_: List[str] , UpperCAmelCase_: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = param_name.split("""_""" )
_SCREAMING_SNAKE_CASE = [TrialShortNamer.shortname_for_word(UpperCAmelCase_ , UpperCAmelCase_ ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
_SCREAMING_SNAKE_CASE = ["""""", """_"""]
for separator in separators:
_SCREAMING_SNAKE_CASE = separator.join(UpperCAmelCase_ )
if shortname not in info["reverse_short_param"]:
_SCREAMING_SNAKE_CASE = shortname
_SCREAMING_SNAKE_CASE = param_name
return shortname
return param_name
@staticmethod
def UpperCamelCase ( UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TrialShortNamer.shortname_for_key(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = short_name
_SCREAMING_SNAKE_CASE = param_name
@classmethod
def UpperCamelCase ( cls: str ):
'''simple docstring'''
if cls.NAMING_INFO is not None:
return
_SCREAMING_SNAKE_CASE = {
"""short_word""": {},
"""reverse_short_word""": {},
"""short_param""": {},
"""reverse_short_param""": {},
}
_SCREAMING_SNAKE_CASE = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = info
@classmethod
def UpperCamelCase ( cls: Any , UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
cls.build_naming_info()
assert cls.PREFIX is not None
_SCREAMING_SNAKE_CASE = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F'You should provide a default value for the param name {k} with value {v}' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
_SCREAMING_SNAKE_CASE = cls.NAMING_INFO["""short_param"""][k]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = 1 if v else 0
_SCREAMING_SNAKE_CASE = """""" if isinstance(UpperCAmelCase_ , (int, float) ) else """-"""
_SCREAMING_SNAKE_CASE = F'{key}{sep}{v}'
name.append(UpperCAmelCase_ )
return "_".join(UpperCAmelCase_ )
@classmethod
def UpperCamelCase ( cls: int , UpperCAmelCase_: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
_SCREAMING_SNAKE_CASE = []
else:
_SCREAMING_SNAKE_CASE = repr.split("""_""" )
_SCREAMING_SNAKE_CASE = {}
for value in values:
if "-" in value:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = value.split("""-""" )
else:
_SCREAMING_SNAKE_CASE = re.sub("""[0-9.]""" , """""" , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = float(re.sub("""[^0-9.]""" , """""" , UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = cls.NAMING_INFO["""reverse_short_param"""][p_k]
_SCREAMING_SNAKE_CASE = p_v
for k in cls.DEFAULTS:
if k not in parameters:
_SCREAMING_SNAKE_CASE = cls.DEFAULTS[k]
return parameters
| 125 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( _lowercase , unittest.TestCase):
snake_case__ = BlenderbotSmallTokenizer
snake_case__ = False
def _UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
super().setUp()
_UpperCamelCase = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
_UpperCamelCase = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
_UpperCamelCase = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
_UpperCamelCase = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__UpperCamelCase ) )
def _UpperCamelCase ( self : Any , **__UpperCamelCase : Optional[int] ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def _UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : str ) -> Optional[int]:
_UpperCamelCase = '''adapt act apte'''
_UpperCamelCase = '''adapt act apte'''
return input_text, output_text
def _UpperCamelCase ( self : List[Any] ) -> List[Any]:
_UpperCamelCase = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCamelCase = '''adapt act apte'''
_UpperCamelCase = ['''adapt''', '''act''', '''ap@@''', '''te''']
_UpperCamelCase = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCamelCase = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_UpperCamelCase = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
def _UpperCamelCase ( self : Optional[Any] ) -> int:
_UpperCamelCase = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [1384]
_UpperCamelCase = '''I am a small frog.'''
_UpperCamelCase = tok([src_text] , padding=__UpperCamelCase , truncation=__UpperCamelCase )['''input_ids''']
_UpperCamelCase = tok.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def _UpperCamelCase ( self : str ) -> Optional[int]:
_UpperCamelCase = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
_UpperCamelCase = '''I am a small frog .'''
_UpperCamelCase = '''.'''
_UpperCamelCase = tok(__UpperCamelCase )['''input_ids''']
_UpperCamelCase = tok(__UpperCamelCase )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 256 | """simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"""vocab_file""": """spiece.model"""}
UpperCAmelCase = {
"""vocab_file""": {
"""bert_for_seq_generation""": (
"""https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"""
),
}
}
UpperCAmelCase = {"""bert_for_seq_generation""": 512}
class UpperCAmelCase_ ( _lowercase):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = []
snake_case__ = ['''input_ids''', '''attention_mask''']
def __init__( self : Any , __UpperCamelCase : int , __UpperCamelCase : Optional[int]="<s>" , __UpperCamelCase : Optional[Any]="</s>" , __UpperCamelCase : Optional[Any]="<unk>" , __UpperCamelCase : Tuple="<pad>" , __UpperCamelCase : int="<::::>" , __UpperCamelCase : Optional[Dict[str, Any]] = None , **__UpperCamelCase : Any , ) -> None:
_UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , sep_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
_UpperCamelCase = vocab_file
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCamelCase )
@property
def _UpperCamelCase ( self : Optional[int] ) -> Tuple:
return self.sp_model.get_piece_size()
def _UpperCamelCase ( self : int ) -> Optional[int]:
_UpperCamelCase = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ) -> Union[str, Any]:
_UpperCamelCase = self.__dict__.copy()
_UpperCamelCase = None
return state
def __setstate__( self : str , __UpperCamelCase : Any ) -> Tuple:
_UpperCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCamelCase = {}
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : str ) -> List[str]:
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def _UpperCamelCase ( self : Tuple , __UpperCamelCase : Any ) -> Optional[int]:
return self.sp_model.piece_to_id(__UpperCamelCase )
def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Optional[int] ) -> Optional[Any]:
_UpperCamelCase = self.sp_model.IdToPiece(__UpperCamelCase )
return token
def _UpperCamelCase ( self : str , __UpperCamelCase : Dict ) -> Optional[Any]:
_UpperCamelCase = []
_UpperCamelCase = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__UpperCamelCase ) + token
_UpperCamelCase = []
else:
current_sub_tokens.append(__UpperCamelCase )
out_string += self.sp_model.decode(__UpperCamelCase )
return out_string.strip()
def _UpperCamelCase ( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCamelCase = os.path.join(
__UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , '''wb''' ) as fi:
_UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
| 256 | 1 |
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class a__ :
def __init__( self , UpperCAmelCase ) -> Optional[Any]:
if isinstance(snake_case__ , snake_case__ ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
__a = deepcopy(snake_case__ )
elif os.path.exists(snake_case__ ):
with io.open(snake_case__ , 'r' , encoding='utf-8' ) as f:
__a = json.load(snake_case__ )
else:
try:
__a = baseaa.urlsafe_baadecode(snake_case__ ).decode('utf-8' )
__a = json.loads(snake_case__ )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''' )
__a = config
self.set_stage_and_offload()
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
__a = self.get_value('zero_optimization.stage' , -1 )
# offload
__a = False
if self.is_zeroa() or self.is_zeroa():
__a = set(['cpu', 'nvme'] )
__a = set(
[
self.get_value('zero_optimization.offload_optimizer.device' ),
self.get_value('zero_optimization.offload_param.device' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
__a = True
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> List[str]:
__a = self.config
# find the config node of interest if it exists
__a = ds_key_long.split('.' )
__a = nodes.pop()
for node in nodes:
__a = config.get(snake_case__ )
if config is None:
return None, ds_key
return config, ds_key
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase=None ) -> Tuple:
__a = self.find_config_node(snake_case__ )
if config is None:
return default
return config.get(snake_case__ , snake_case__ )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase=False ) -> Optional[int]:
__a = self.config
# find the config node of interest if it exists
__a = ds_key_long.split('.' )
for node in nodes:
__a = config
__a = config.get(snake_case__ )
if config is None:
if must_exist:
raise ValueError(f'''Can\'t find {ds_key_long} entry in the config: {self.config}''' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(snake_case__ )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> Optional[int]:
__a = self.get_value(snake_case__ )
return False if value is None else bool(snake_case__ )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> Optional[Any]:
__a = self.get_value(snake_case__ )
return False if value is None else not bool(snake_case__ )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return self._stage == 2
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return self._stage == 3
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return self._offload
class a__ :
def __init__( self , UpperCAmelCase ) -> Optional[Any]:
__a = engine
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , **UpperCAmelCase ) -> Dict:
self.engine.backward(snake_case__ , **snake_case__ )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class a__ ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , UpperCAmelCase ) -> Union[str, Any]:
super().__init__(snake_case__ , device_placement=snake_case__ , scaler=snake_case__ )
__a = hasattr(self.optimizer , 'overflow' )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase=None ) -> List[Any]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def __SCREAMING_SNAKE_CASE ( self ) -> str:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
if self.__has_overflow__:
return self.optimizer.overflow
return False
class a__ ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
super().__init__(snake_case__ , snake_case__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class a__ :
def __init__( self , UpperCAmelCase , UpperCAmelCase=0.001 , UpperCAmelCase=0 , **UpperCAmelCase ) -> List[str]:
__a = params
__a = lr
__a = weight_decay
__a = kwargs
class a__ :
def __init__( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=0 , **UpperCAmelCase ) -> List[str]:
__a = optimizer
__a = total_num_steps
__a = warmup_num_steps
__a = kwargs
| 353 | from ..utils import DummyObject, requires_backends
class a__ ( metaclass=__snake_case ):
A__ : List[Any] = ['torch', 'transformers', 'onnx']
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> Any:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> List[str]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class a__ ( metaclass=__snake_case ):
A__ : Union[str, Any] = ['torch', 'transformers', 'onnx']
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> int:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> int:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> Dict:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class a__ ( metaclass=__snake_case ):
A__ : Dict = ['torch', 'transformers', 'onnx']
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> Any:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> int:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class a__ ( metaclass=__snake_case ):
A__ : Union[str, Any] = ['torch', 'transformers', 'onnx']
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> int:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class a__ ( metaclass=__snake_case ):
A__ : Dict = ['torch', 'transformers', 'onnx']
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> Optional[int]:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> Any:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> str:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class a__ ( metaclass=__snake_case ):
A__ : Tuple = ['torch', 'transformers', 'onnx']
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> int:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> List[str]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> Any:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 197 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_UpperCAmelCase : int = {"""configuration_speech_encoder_decoder""": ["""SpeechEncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = ["""SpeechEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = ["""FlaxSpeechEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
_UpperCAmelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase : Optional[Any] = """▁"""
_UpperCAmelCase : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class lowerCAmelCase ( __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = BertGenerationTokenizer
UpperCAmelCase__ = False
UpperCAmelCase__ = True
def A_ ( self : List[Any] ) -> List[str]:
super().setUp()
lowerCamelCase__ : Dict = BertGenerationTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self : Optional[Any] ) -> Dict:
lowerCamelCase__ : List[str] = '<s>'
lowerCamelCase__ : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase )
def A_ ( self : List[str] ) -> Optional[int]:
lowerCamelCase__ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(UpperCAmelCase ) , 1002 )
def A_ ( self : List[Any] ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def A_ ( self : Union[str, Any] ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = BertGenerationTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
lowerCamelCase__ : List[str] = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [285, 46, 10, 170, 382] , )
lowerCamelCase__ : List[str] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowerCamelCase__ : Optional[int] = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCamelCase__ : Optional[Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def A_ ( self : Dict ) -> Tuple:
return BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
@slow
def A_ ( self : Optional[int] ) -> List[str]:
lowerCamelCase__ : Union[str, Any] = 'Hello World!'
lowerCamelCase__ : Dict = [18536, 2260, 101]
self.assertListEqual(UpperCAmelCase , self.big_tokenizer.encode(UpperCAmelCase ) )
@slow
def A_ ( self : Optional[Any] ) -> str:
lowerCamelCase__ : List[Any] = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
lowerCamelCase__ : Any = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
34324,
497,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(UpperCAmelCase , self.big_tokenizer.encode(UpperCAmelCase ) )
@require_torch
@slow
def A_ ( self : int ) -> Optional[Any]:
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
lowerCamelCase__ : str = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCamelCase__ : int = ' '.join(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = self.big_tokenizer.encode_plus(UpperCAmelCase , return_tensors='pt' , return_token_type_ids=UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=UpperCAmelCase )
lowerCamelCase__ : Tuple = BertGenerationConfig()
lowerCamelCase__ : Optional[Any] = BertGenerationEncoder(UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCAmelCase )
model(**UpperCAmelCase )
@slow
def A_ ( self : Optional[int] ) -> List[Any]:
# fmt: off
lowerCamelCase__ : Any = {'input_ids': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name='google/bert_for_seq_generation_L-24_bbc_encoder' , revision='c817d1fd1be2ffa69431227a1fe320544943d4db' , )
| 50 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Union[str, Any] = {'''configuration_fnet''': ['''FNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = ['''FNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = ['''FNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = [
'''FNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FNetForMaskedLM''',
'''FNetForMultipleChoice''',
'''FNetForNextSentencePrediction''',
'''FNetForPreTraining''',
'''FNetForQuestionAnswering''',
'''FNetForSequenceClassification''',
'''FNetForTokenClassification''',
'''FNetLayer''',
'''FNetModel''',
'''FNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
a__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 195 |
"""simple docstring"""
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(lowerCAmelCase_ , lowerCAmelCase_ ) ) )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if dataset.ndim != value_array.ndim:
__SCREAMING_SNAKE_CASE = (
"Wrong input data's dimensions... "
f"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(lowerCAmelCase_ )
try:
if dataset.shape[1] != value_array.shape[1]:
__SCREAMING_SNAKE_CASE = (
"Wrong input data's shape... "
f"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(lowerCAmelCase_ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
__SCREAMING_SNAKE_CASE = (
"Input data have different datatype... "
f"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = []
for value in value_array:
__SCREAMING_SNAKE_CASE = euclidean(lowerCAmelCase_ , dataset[0] )
__SCREAMING_SNAKE_CASE = dataset[0].tolist()
for dataset_value in dataset[1:]:
__SCREAMING_SNAKE_CASE = euclidean(lowerCAmelCase_ , lowerCAmelCase_ )
if dist > temp_dist:
__SCREAMING_SNAKE_CASE = temp_dist
__SCREAMING_SNAKE_CASE = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
return np.dot(lowerCAmelCase_ , lowerCAmelCase_ ) / (norm(lowerCAmelCase_ ) * norm(lowerCAmelCase_ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 195 | 1 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = XCLIPTextConfig()
# derive patch size from model name
__SCREAMING_SNAKE_CASE : Optional[int] = model_name.find('''patch''' )
__SCREAMING_SNAKE_CASE : Dict = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = XCLIPVisionConfig(patch_size=lowercase__ , num_frames=lowercase__ )
if "large" in model_name:
__SCREAMING_SNAKE_CASE : int = 768
__SCREAMING_SNAKE_CASE : Dict = 3072
__SCREAMING_SNAKE_CASE : List[Any] = 12
__SCREAMING_SNAKE_CASE : Any = 1024
__SCREAMING_SNAKE_CASE : Union[str, Any] = 4096
__SCREAMING_SNAKE_CASE : Union[str, Any] = 16
__SCREAMING_SNAKE_CASE : List[Any] = 24
__SCREAMING_SNAKE_CASE : Union[str, Any] = 768
__SCREAMING_SNAKE_CASE : str = 3072
if model_name == "xclip-large-patch14-16-frames":
__SCREAMING_SNAKE_CASE : Any = 336
__SCREAMING_SNAKE_CASE : int = XCLIPConfig.from_text_vision_configs(lowercase__ , lowercase__ )
if "large" in model_name:
__SCREAMING_SNAKE_CASE : Optional[int] = 768
return config
def _UpperCamelCase ( lowercase__ ):
# text encoder
if name == "token_embedding.weight":
__SCREAMING_SNAKE_CASE : Dict = name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' )
if name == "positional_embedding":
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' )
if "ln_1" in name:
__SCREAMING_SNAKE_CASE : Dict = name.replace('''ln_1''' , '''layer_norm1''' )
if "ln_2" in name:
__SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''ln_2''' , '''layer_norm2''' )
if "c_fc" in name:
__SCREAMING_SNAKE_CASE : Tuple = name.replace('''c_fc''' , '''fc1''' )
if "c_proj" in name:
__SCREAMING_SNAKE_CASE : Tuple = name.replace('''c_proj''' , '''fc2''' )
if name.startswith('''transformer.resblocks''' ):
__SCREAMING_SNAKE_CASE : int = name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' )
if "attn.out_proj" in name and "message" not in name:
__SCREAMING_SNAKE_CASE : List[Any] = name.replace('''attn.out_proj''' , '''self_attn.out_proj''' )
if "ln_final" in name:
__SCREAMING_SNAKE_CASE : List[Any] = name.replace('''ln_final''' , '''text_model.final_layer_norm''' )
# visual encoder
if name == "visual.class_embedding":
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' )
if name == "visual.positional_embedding":
__SCREAMING_SNAKE_CASE : str = name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' )
if name.startswith('''visual.transformer.resblocks''' ):
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' )
if "visual.conv1" in name:
__SCREAMING_SNAKE_CASE : List[Any] = name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' )
if "visual.ln_pre" in name:
__SCREAMING_SNAKE_CASE : Dict = name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' )
if "visual.ln_post" in name:
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' )
if "visual.proj" in name:
__SCREAMING_SNAKE_CASE : str = name.replace('''visual.proj''' , '''visual_projection.weight''' )
if "text_projection" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''text_projection''' , '''text_projection.weight''' )
# things on top
if "prompts_visual_proj" in name:
__SCREAMING_SNAKE_CASE : Tuple = name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' )
if "prompts_visual_ln" in name:
__SCREAMING_SNAKE_CASE : Any = name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' )
# mit
if name == "mit.positional_embedding":
__SCREAMING_SNAKE_CASE : Dict = name.replace('''positional''' , '''position''' )
if name.startswith('''mit.resblocks''' ):
__SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''mit.resblocks''' , '''mit.encoder.layers''' )
# prompts generator
if name.startswith('''prompts_generator.norm''' ):
__SCREAMING_SNAKE_CASE : List[Any] = name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' )
return name
def _UpperCamelCase ( lowercase__ , lowercase__ ):
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE : List[str] = orig_state_dict.pop(lowercase__ )
if "attn.in_proj" in key:
__SCREAMING_SNAKE_CASE : str = key.split('''.''' )
if key.startswith('''visual''' ):
__SCREAMING_SNAKE_CASE : str = key_split[3]
__SCREAMING_SNAKE_CASE : Union[str, Any] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__SCREAMING_SNAKE_CASE : int = val[
:dim, :
]
__SCREAMING_SNAKE_CASE : str = val[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : str = val[
-dim:, :
]
else:
__SCREAMING_SNAKE_CASE : str = val[
:dim
]
__SCREAMING_SNAKE_CASE : Union[str, Any] = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : Optional[int] = val[
-dim:
]
else:
if "weight" in key:
__SCREAMING_SNAKE_CASE : Any = val[
:dim, :
]
__SCREAMING_SNAKE_CASE : Optional[int] = val[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : Optional[int] = val[
-dim:, :
]
else:
__SCREAMING_SNAKE_CASE : Optional[int] = val[:dim]
__SCREAMING_SNAKE_CASE : Optional[int] = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : Tuple = val[-dim:]
elif key.startswith('''mit''' ):
__SCREAMING_SNAKE_CASE : Dict = key_split[2]
__SCREAMING_SNAKE_CASE : Any = config.vision_config.mit_hidden_size
if "weight" in key:
__SCREAMING_SNAKE_CASE : Dict = val[:dim, :]
__SCREAMING_SNAKE_CASE : Union[str, Any] = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE : List[Any] = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE : Dict = val[:dim]
__SCREAMING_SNAKE_CASE : Optional[Any] = val[dim : dim * 2]
__SCREAMING_SNAKE_CASE : List[str] = val[-dim:]
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = key_split[2]
__SCREAMING_SNAKE_CASE : Optional[int] = config.text_config.hidden_size
if "weight" in key:
__SCREAMING_SNAKE_CASE : Dict = val[:dim, :]
__SCREAMING_SNAKE_CASE : Optional[Any] = val[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : Optional[Any] = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE : Tuple = val[:dim]
__SCREAMING_SNAKE_CASE : Any = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : Any = val[-dim:]
else:
__SCREAMING_SNAKE_CASE : List[str] = rename_key(lowercase__ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__SCREAMING_SNAKE_CASE : Any = val.T
__SCREAMING_SNAKE_CASE : Dict = val
return orig_state_dict
def _UpperCamelCase ( lowercase__ ):
if num_frames == 8:
__SCREAMING_SNAKE_CASE : List[str] = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 16:
__SCREAMING_SNAKE_CASE : str = '''eating_spaghetti.npy'''
elif num_frames == 32:
__SCREAMING_SNAKE_CASE : Dict = '''eating_spaghetti_32_frames.npy'''
__SCREAMING_SNAKE_CASE : str = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename=lowercase__ , repo_type='''dataset''' , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.load(lowercase__ )
return list(lowercase__ )
def _UpperCamelCase ( lowercase__ , lowercase__=None , lowercase__=False ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
__SCREAMING_SNAKE_CASE : Optional[Any] = model_to_url[model_name]
__SCREAMING_SNAKE_CASE : int = 8
if "16-frames" in model_name:
__SCREAMING_SNAKE_CASE : Tuple = 16
elif "shot" in model_name:
__SCREAMING_SNAKE_CASE : List[Any] = 32
__SCREAMING_SNAKE_CASE : Any = get_xclip_config(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : str = XCLIPModel(lowercase__ )
model.eval()
if "drive" in checkpoint_url:
__SCREAMING_SNAKE_CASE : Optional[int] = '''pytorch_model.bin'''
gdown.cached_download(lowercase__ , lowercase__ , quiet=lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.load(lowercase__ , map_location='''cpu''' )['''model''']
else:
__SCREAMING_SNAKE_CASE : List[str] = torch.hub.load_state_dict_from_url(lowercase__ )['''model''']
__SCREAMING_SNAKE_CASE : str = convert_state_dict(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : Dict = XCLIPModel(lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = model.load_state_dict(lowercase__ , strict=lowercase__ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__SCREAMING_SNAKE_CASE : Tuple = 336 if model_name == '''xclip-large-patch14-16-frames''' else 224
__SCREAMING_SNAKE_CASE : Dict = VideoMAEImageProcessor(size=lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' )
__SCREAMING_SNAKE_CASE : int = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = XCLIPProcessor(image_processor=lowercase__ , tokenizer=lowercase__ )
__SCREAMING_SNAKE_CASE : Dict = prepare_video(lowercase__ )
__SCREAMING_SNAKE_CASE : int = processor(
text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=lowercase__ , return_tensors='''pt''' , padding=lowercase__ )
print('''Shape of pixel values:''' , inputs.pixel_values.shape )
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Optional[Any] = model(**lowercase__ )
# Verify outputs
__SCREAMING_SNAKE_CASE : Tuple = outputs.logits_per_video
__SCREAMING_SNAKE_CASE : Optional[int] = logits_per_video.softmax(dim=1 )
print('''Probs:''' , lowercase__ )
# kinetics-400
if model_name == "xclip-base-patch32":
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
__SCREAMING_SNAKE_CASE : Dict = torch.tensor([[7.09_99e-04, 9.98_83e-01, 4.55_80e-04]] )
elif model_name == "xclip-base-patch16":
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[7.69_37e-04, 9.97_28e-01, 1.94_73e-03]] )
elif model_name == "xclip-large-patch14":
__SCREAMING_SNAKE_CASE : Any = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[3.38_77e-04, 9.99_37e-01, 2.88_88e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__SCREAMING_SNAKE_CASE : Any = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[3.85_54e-04, 9.99_29e-01, 3.27_54e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__SCREAMING_SNAKE_CASE : str = torch.tensor([[7.18_90e-06, 9.99_94e-01, 5.65_59e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__SCREAMING_SNAKE_CASE : Dict = torch.tensor([[1.03_20e-05, 9.99_93e-01, 6.24_35e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[4.13_77e-06, 9.99_90e-01, 9.83_86e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__SCREAMING_SNAKE_CASE : Any = torch.tensor([[4.13_47e-05, 9.99_62e-01, 3.34_11e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
__SCREAMING_SNAKE_CASE : Dict = torch.tensor([[8.58_57e-05, 9.99_28e-01, 6.32_91e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[8.58_57e-05, 9.99_28e-01, 6.32_91e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
__SCREAMING_SNAKE_CASE : str = torch.tensor([[9.82_19e-04, 9.95_93e-01, 3.08_63e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[3.50_82e-04, 9.97_85e-01, 1.79_66e-03]] )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
assert torch.allclose(lowercase__ , lowercase__ , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase__ )
if push_to_hub:
print('''Pushing model, processor and slow tokenizer files to the hub...''' )
model.push_to_hub(lowercase__ , organization='''nielsr''' )
processor.push_to_hub(lowercase__ , organization='''nielsr''' )
slow_tokenizer.push_to_hub(lowercase__ , organization='''nielsr''' )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='xclip-base-patch32',
type=str,
help='Name of the model.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__lowerCAmelCase : Optional[int] =parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 9 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__lowerCAmelCase : List[str] ='true'
def _UpperCamelCase ( lowercase__ , lowercase__=82 , lowercase__=16 ):
set_seed(42 )
__SCREAMING_SNAKE_CASE : Optional[int] = RegressionModel()
__SCREAMING_SNAKE_CASE : Optional[int] = deepcopy(lowercase__ )
__SCREAMING_SNAKE_CASE : Any = RegressionDataset(length=lowercase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = DataLoader(lowercase__ , batch_size=lowercase__ )
model.to(accelerator.device )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.prepare(lowercase__ , lowercase__ )
return model, ddp_model, dataloader
def _UpperCamelCase ( lowercase__ , lowercase__=False ):
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
__SCREAMING_SNAKE_CASE : str = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
with accelerator.main_process_first():
__SCREAMING_SNAKE_CASE : Tuple = dataset.map(
lowercase__ , batched=lowercase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
__SCREAMING_SNAKE_CASE : List[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowercase__ ):
if use_longest:
return tokenizer.pad(lowercase__ , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(lowercase__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return DataLoader(lowercase__ , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=16 )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : str = Accelerator(dispatch_batches=lowercase__ , split_batches=lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = get_dataloader(lowercase__ , not dispatch_batches )
__SCREAMING_SNAKE_CASE : List[str] = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = accelerator.prepare(lowercase__ , lowercase__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = []
for batch in dataloader:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = batch.values()
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Dict = model(lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = [], []
for logit, targ in logits_and_targets:
logits.append(lowercase__ )
targs.append(lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = torch.cat(lowercase__ ), torch.cat(lowercase__ )
return logits, targs
def _UpperCamelCase ( lowercase__ , lowercase__=82 , lowercase__=False , lowercase__=False , lowercase__=16 ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = get_basic_setup(lowercase__ , lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = generate_predictions(lowercase__ , lowercase__ , lowercase__ )
assert (
len(lowercase__ ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowercase__ )}'''
def _UpperCamelCase ( lowercase__ = False , lowercase__ = False ):
__SCREAMING_SNAKE_CASE : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = get_mrpc_setup(lowercase__ , lowercase__ )
# First do baseline
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = setup['''no''']
model.to(lowercase__ )
model.eval()
for batch in dataloader:
batch.to(lowercase__ )
with torch.inference_mode():
__SCREAMING_SNAKE_CASE : Dict = model(**lowercase__ )
__SCREAMING_SNAKE_CASE : Dict = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowercase__ , references=batch['''labels'''] )
__SCREAMING_SNAKE_CASE : int = metric.compute()
# Then do distributed
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
__SCREAMING_SNAKE_CASE : int = model(**lowercase__ )
__SCREAMING_SNAKE_CASE : str = outputs.logits.argmax(dim=-1 )
__SCREAMING_SNAKE_CASE : Any = batch['''labels''']
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowercase__ , references=lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Dict = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(lowercase__ , lowercase__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__SCREAMING_SNAKE_CASE : List[Any] = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(lowercase__ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
__SCREAMING_SNAKE_CASE : Tuple = Accelerator()
test_torch_metrics(lowercase__ , 512 )
accelerator.state._reset_state()
def _UpperCamelCase ( lowercase__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 9 | 1 |
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase( a , a = None , a = None ):
if start is None:
__a = 0
if end is None:
__a = len(SCREAMING_SNAKE_CASE__ ) - 1
if start >= end:
return
__a = (start + end) // 2
slowsort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
slowsort(SCREAMING_SNAKE_CASE__ , mid + 1 , SCREAMING_SNAKE_CASE__ )
if sequence[end] < sequence[mid]:
__a , __a = sequence[mid], sequence[end]
slowsort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 367 | """simple docstring"""
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
SCREAMING_SNAKE_CASE__:Union[str, Any] = logging.get_logger(__name__)
class snake_case__ :
_snake_case : List[str] = None
@experimental
def _lowerCamelCase( a , a , a , a , a , a , a ):
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
a , a , a , a , a , a , a )
return _map_with_joblib(a , a , a , a , a , a , a )
def _lowerCamelCase( a , a , a , a , a , a , a ):
__a = num_proc if num_proc <= len(a ) else len(a )
__a = [] # We organize the splits ourselve (contiguous splits)
for index in range(a ):
__a = len(a ) // num_proc
__a = len(a ) % num_proc
__a = div * index + min(a , a )
__a = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(a ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F"Error dividing inputs iterable among processes. "
F"Total number of objects {len(a )}, "
F"length: {sum(len(i[1] ) for i in split_kwds )}" )
logger.info(
F"Spawning {num_proc} processes for {len(a )} objects in slices of {[len(i[1] ) for i in split_kwds]}" )
__a , __a = None, None
if not disable_tqdm:
__a , __a = (RLock(),), tqdm.set_lock
with Pool(a , initargs=a , initializer=a ) as pool:
__a = pool.map(a , a )
logger.info(F"Finished {num_proc} processes" )
__a = [obj for proc_res in mapped for obj in proc_res]
logger.info(F"Unpacked {len(a )} objects" )
return mapped
def _lowerCamelCase( a , a , a , a , a , a , a ):
# progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib,
# and it requires monkey-patching joblib internal classes which is subject to change
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=a ):
return joblib.Parallel()(
joblib.delayed(a )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def _lowerCamelCase( a ):
__a = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
__a = None
| 268 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : str ) -> str | Literal[False]:
UpperCAmelCase_ : Union[str, Any] = list(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : List[str] = list(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Dict = 0
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if lista[i] != lista[i]:
count += 1
UpperCAmelCase_ : List[str] = '''_'''
if count > 1:
return False
else:
return "".join(SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : list[str] ) -> list[str]:
UpperCAmelCase_ : List[Any] = []
while True:
UpperCAmelCase_ : Tuple = ['''$'''] * len(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[int] = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
for j in range(i + 1, len(SCREAMING_SNAKE_CASE__ ) ):
UpperCAmelCase_ : Optional[int] = compare_string(binary[i], binary[j] )
if k is False:
UpperCAmelCase_ : Union[str, Any] = '''*'''
UpperCAmelCase_ : Optional[Any] = '''*'''
temp.append('''X''' )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return pi
UpperCAmelCase_ : Optional[Any] = list(set(SCREAMING_SNAKE_CASE__ ) )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : Sequence[float] ) -> list[str]:
UpperCAmelCase_ : int = []
for minterm in minterms:
UpperCAmelCase_ : Optional[int] = ''''''
for _ in range(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase_ : Union[str, Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(SCREAMING_SNAKE_CASE__ )
return temp
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : int ) -> bool:
UpperCAmelCase_ : Tuple = list(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Dict = list(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[int] = 0
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : list[list[int]], SCREAMING_SNAKE_CASE__ : list[str] ) -> list[str]:
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : Dict = [0] * len(SCREAMING_SNAKE_CASE__ )
for i in range(len(chart[0] ) ):
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : Optional[Any] = -1
for j in range(len(SCREAMING_SNAKE_CASE__ ) ):
if chart[j][i] == 1:
count += 1
UpperCAmelCase_ : Any = j
if count == 1:
UpperCAmelCase_ : List[str] = 1
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(SCREAMING_SNAKE_CASE__ ) ):
UpperCAmelCase_ : Any = 0
temp.append(prime_implicants[i] )
while True:
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : Union[str, Any] = -1
UpperCAmelCase_ : Optional[Any] = 0
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
UpperCAmelCase_ : Optional[int] = chart[i].count(1 )
if count_n > max_n:
UpperCAmelCase_ : int = count_n
UpperCAmelCase_ : List[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(SCREAMING_SNAKE_CASE__ ) ):
UpperCAmelCase_ : Optional[int] = 0
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : list[str], SCREAMING_SNAKE_CASE__ : list[str] ) -> list[list[int]]:
UpperCAmelCase_ : Tuple = [[0 for x in range(len(SCREAMING_SNAKE_CASE__ ) )] for x in range(len(SCREAMING_SNAKE_CASE__ ) )]
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
UpperCAmelCase_ : Any = prime_implicants[i].count('''_''' )
for j in range(len(SCREAMING_SNAKE_CASE__ ) ):
if is_for_table(prime_implicants[i], binary[j], SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase_ : str = 1
return chart
def lowerCamelCase_ ( ) -> None:
UpperCAmelCase_ : Optional[int] = int(input('''Enter the no. of variables\n''' ) )
UpperCAmelCase_ : Union[str, Any] = [
float(SCREAMING_SNAKE_CASE__ )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
UpperCAmelCase_ : Tuple = decimal_to_binary(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Union[str, Any] = check(SCREAMING_SNAKE_CASE__ )
print('''Prime Implicants are:''' )
print(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Tuple = prime_implicant_chart(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[int] = selection(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
print('''Essential Prime Implicants are:''' )
print(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 125 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Any = logging.get_logger(__name__)
snake_case_ : Dict = {
"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json",
}
class __a (lowerCamelCase ):
__a : Tuple = "roc_bert"
def __init__( self : Union[str, Any] , __magic_name__ : List[str]=3_05_22 , __magic_name__ : Tuple=7_68 , __magic_name__ : Any=12 , __magic_name__ : Optional[Any]=12 , __magic_name__ : Union[str, Any]=30_72 , __magic_name__ : Optional[int]="gelu" , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Tuple=0.1 , __magic_name__ : Any=5_12 , __magic_name__ : str=2 , __magic_name__ : Any=0.0_2 , __magic_name__ : Dict=1E-12 , __magic_name__ : int=True , __magic_name__ : Optional[int]=0 , __magic_name__ : str="absolute" , __magic_name__ : Tuple=None , __magic_name__ : Any=True , __magic_name__ : Optional[Any]=True , __magic_name__ : List[str]=7_68 , __magic_name__ : List[Any]=9_10 , __magic_name__ : Tuple=5_12 , __magic_name__ : Dict=2_48_58 , __magic_name__ : Any=True , **__magic_name__ : Union[str, Any] , ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : int = num_hidden_layers
UpperCAmelCase_ : Optional[Any] = num_attention_heads
UpperCAmelCase_ : Optional[int] = intermediate_size
UpperCAmelCase_ : Dict = hidden_act
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_ : Dict = initializer_range
UpperCAmelCase_ : Optional[Any] = type_vocab_size
UpperCAmelCase_ : str = layer_norm_eps
UpperCAmelCase_ : Tuple = use_cache
UpperCAmelCase_ : Optional[int] = enable_pronunciation
UpperCAmelCase_ : Union[str, Any] = enable_shape
UpperCAmelCase_ : List[str] = pronunciation_embed_dim
UpperCAmelCase_ : List[str] = pronunciation_vocab_size
UpperCAmelCase_ : int = shape_embed_dim
UpperCAmelCase_ : Optional[int] = shape_vocab_size
UpperCAmelCase_ : Optional[Any] = concat_input
UpperCAmelCase_ : Dict = position_embedding_type
UpperCAmelCase_ : Union[str, Any] = classifier_dropout
super().__init__(pad_token_id=__magic_name__ , **__magic_name__ )
| 125 | 1 |
'''simple docstring'''
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Dict = 1
@register_to_config
def __init__( self : List[Any], a_ : int = 1000, a_ : Optional[Union[np.ndarray, List[float]]] = None ):
"""simple docstring"""
self.set_timesteps(a_ )
# standard deviation of the initial noise distribution
UpperCamelCase__ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
UpperCamelCase__ = 4
# running values
UpperCamelCase__ = []
def lowercase_ ( self : str, a_ : int, a_ : Union[str, torch.device] = None ):
"""simple docstring"""
UpperCamelCase__ = num_inference_steps
UpperCamelCase__ = torch.linspace(1, 0, num_inference_steps + 1 )[:-1]
UpperCamelCase__ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
UpperCamelCase__ = torch.tensor(self.config.trained_betas, dtype=torch.floataa )
else:
UpperCamelCase__ = torch.sin(steps * math.pi / 2 ) ** 2
UpperCamelCase__ = (1.0 - self.betas**2) ** 0.5
UpperCamelCase__ = (torch.atana(self.betas, self.alphas ) / math.pi * 2)[:-1]
UpperCamelCase__ = timesteps.to(a_ )
UpperCamelCase__ = []
def lowercase_ ( self : List[str], a_ : torch.FloatTensor, a_ : int, a_ : torch.FloatTensor, a_ : bool = True, ):
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
UpperCamelCase__ = (self.timesteps == timestep).nonzero().item()
UpperCamelCase__ = timestep_index + 1
UpperCamelCase__ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(a_ )
if len(self.ets ) == 1:
UpperCamelCase__ = self.ets[-1]
elif len(self.ets ) == 2:
UpperCamelCase__ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
UpperCamelCase__ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
UpperCamelCase__ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
UpperCamelCase__ = self._get_prev_sample(a_, a_, a_, a_ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a_ )
def lowercase_ ( self : str, a_ : torch.FloatTensor, *a_ : Dict, **a_ : Dict ):
"""simple docstring"""
return sample
def lowercase_ ( self : Optional[Any], a_ : Dict, a_ : int, a_ : Optional[Any], a_ : Tuple ):
"""simple docstring"""
UpperCamelCase__ = self.alphas[timestep_index]
UpperCamelCase__ = self.betas[timestep_index]
UpperCamelCase__ = self.alphas[prev_timestep_index]
UpperCamelCase__ = self.betas[prev_timestep_index]
UpperCamelCase__ = (sample - sigma * ets) / max(a_, 1e-8 )
UpperCamelCase__ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : List[str] ):
"""simple docstring"""
return self.config.num_train_timesteps | 31 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = ['torch', 'scipy']
def __init__( self : List[str], *a_ : Optional[int], **a_ : int ):
"""simple docstring"""
requires_backends(self, ["torch", "scipy"] )
@classmethod
def lowercase_ ( cls : Dict, *a_ : Tuple, **a_ : Dict ):
"""simple docstring"""
requires_backends(cls, ["torch", "scipy"] )
@classmethod
def lowercase_ ( cls : Optional[Any], *a_ : List[Any], **a_ : Any ):
"""simple docstring"""
requires_backends(cls, ["torch", "scipy"] ) | 31 | 1 |
from ..utils import DummyObject, requires_backends
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Optional[int] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : List[Any] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> str:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : List[Any] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Any = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Tuple = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : List[Any] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : List[str] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : str = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : int = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Optional[int] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Optional[int] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(cls, ['torch'])
def UpperCamelCase_( *lowerCamelCase_ , **lowerCamelCase_ ) -> Optional[int]:
requires_backends(lowerCamelCase_ , ['torch'] )
def UpperCamelCase_( *lowerCamelCase_ , **lowerCamelCase_ ) -> int:
requires_backends(lowerCamelCase_ , ['torch'] )
def UpperCamelCase_( *lowerCamelCase_ , **lowerCamelCase_ ) -> List[str]:
requires_backends(lowerCamelCase_ , ['torch'] )
def UpperCamelCase_( *lowerCamelCase_ , **lowerCamelCase_ ) -> Optional[int]:
requires_backends(lowerCamelCase_ , ['torch'] )
def UpperCamelCase_( *lowerCamelCase_ , **lowerCamelCase_ ) -> Optional[int]:
requires_backends(lowerCamelCase_ , ['torch'] )
def UpperCamelCase_( *lowerCamelCase_ , **lowerCamelCase_ ) -> Dict:
requires_backends(lowerCamelCase_ , ['torch'] )
def UpperCamelCase_( *lowerCamelCase_ , **lowerCamelCase_ ) -> Optional[int]:
requires_backends(lowerCamelCase_ , ['torch'] )
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Union[str, Any] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Optional[Any] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : int = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Union[str, Any] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Dict = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> str:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : str = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> str:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : int = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Optional[Any] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Optional[int] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : int = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Optional[int] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : List[Any] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Dict = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Union[str, Any] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> str:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Optional[Any] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> str:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Union[str, Any] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> str:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> str:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Any = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> str:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Any = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> str:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : int = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Optional[int] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Optional[int] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Tuple = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> str:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : str = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : List[str] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : str = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Union[str, Any] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> str:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : int = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> str:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Dict = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Dict = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Any = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : int = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : int = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : List[str] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : List[Any] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> str:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : List[Any] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : List[Any] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Dict = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Optional[Any] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : List[str] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(cls, ['torch'])
| 21 | """simple docstring"""
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def UpperCAmelCase__ ( lowerCAmelCase__ :Any , lowerCAmelCase__ :str , lowerCAmelCase__ :str , lowerCAmelCase__ :Path , lowerCAmelCase__ :str = None , lowerCAmelCase__ :str = None , lowerCAmelCase__ :str = None , ) -> Optional[int]:
'''simple docstring'''
if config_name_or_path is None:
lowercase = """facebook/rag-token-base""" if model_type == """rag_token""" else """facebook/rag-sequence-base"""
if generator_tokenizer_name_or_path is None:
lowercase = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
lowercase = question_encoder_name_or_path
lowercase = RagTokenForGeneration if model_type == """rag_token""" else RagSequenceForGeneration
# Save model.
lowercase = RagConfig.from_pretrained(lowerCAmelCase__ )
lowercase = AutoConfig.from_pretrained(lowerCAmelCase__ )
lowercase = AutoConfig.from_pretrained(lowerCAmelCase__ )
lowercase = gen_config
lowercase = question_encoder_config
lowercase = model_class.from_pretrained_question_encoder_generator(
lowerCAmelCase__ , lowerCAmelCase__ , config=lowerCAmelCase__ )
rag_model.save_pretrained(lowerCAmelCase__ )
# Sanity check.
model_class.from_pretrained(lowerCAmelCase__ )
# Save tokenizers.
lowercase = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
gen_tokenizer.save_pretrained(dest_dir / """generator_tokenizer/""" )
lowercase = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
question_encoder_tokenizer.save_pretrained(dest_dir / """question_encoder_tokenizer/""" )
if __name__ == "__main__":
__lowerCAmelCase : int =argparse.ArgumentParser()
parser.add_argument(
"""--model_type""",
choices=["""rag_sequence""", """rag_token"""],
required=True,
type=str,
help="""RAG model type: rag_sequence, rag_token""",
)
parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""")
parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""")
parser.add_argument(
"""--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier"""
)
parser.add_argument(
"""--generator_tokenizer_name_or_path""",
type=str,
help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""",
)
parser.add_argument(
"""--question_encoder_tokenizer_name_or_path""",
type=str,
help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""",
)
parser.add_argument(
"""--config_name_or_path""",
type=str,
help=(
"""Identifier of the model config to use, if not provided, resolves to a base config for a given"""
""" ``model_type``"""
),
)
__lowerCAmelCase : List[str] =parser.parse_args()
__lowerCAmelCase : Dict =Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 197 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase_ = {
"configuration_mobilenet_v2": [
"MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileNetV2Config",
"MobileNetV2OnnxConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["MobileNetV2FeatureExtractor"]
UpperCamelCase_ = ["MobileNetV2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileNetV2ForImageClassification",
"MobileNetV2ForSemanticSegmentation",
"MobileNetV2Model",
"MobileNetV2PreTrainedModel",
"load_tf_weights_in_mobilenet_v2",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 246 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
UpperCamelCase_ = "\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n"
UpperCamelCase_ = "\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {'spearmanr': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results['spearmanr'])\n -0.7\n >>> print(round(results['spearmanr_pvalue'], 2))\n 0.19\n"
UpperCamelCase_ = R"\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ), reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'], )
def UpperCamelCase_ ( self, A, A, A=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = spearmanr(A, A )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 246 | 1 |
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __snake_case :
'''simple docstring'''
def __init__( self : List[str] , A : Dict , A : Optional[Any]=13 , A : Optional[int]=7 , A : Optional[Any]=6 , A : List[Any]=17 , A : Any=23 , A : Union[str, Any]=11 , A : int=True , ):
__snake_case: Tuple = parent
__snake_case: Optional[int] = batch_size
__snake_case: Optional[int] = seq_length
__snake_case: Optional[int] = act_dim
__snake_case: Tuple = state_dim
__snake_case: List[Any] = hidden_size
__snake_case: Tuple = max_length
__snake_case: List[str] = is_training
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[int] = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
__snake_case: Any = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
__snake_case: str = floats_tensor((self.batch_size, self.seq_length, 1) )
__snake_case: Optional[Any] = floats_tensor((self.batch_size, self.seq_length, 1) )
__snake_case: Dict = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_000 )
__snake_case: Optional[Any] = random_attention_mask((self.batch_size, self.seq_length) )
__snake_case: Optional[Any] = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def UpperCAmelCase__ ( self : Optional[Any] ):
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def UpperCAmelCase__ ( self : int , A : Dict , A : Optional[int] , A : Any , A : Any , A : List[Any] , A : Any , A : Union[str, Any] , ):
__snake_case: Tuple = DecisionTransformerModel(config=__a )
model.to(__a )
model.eval()
__snake_case: int = model(__a , __a , __a , __a , __a , __a )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Any = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
): Tuple = config_and_inputs
__snake_case: Tuple = {
"""states""": states,
"""actions""": actions,
"""rewards""": rewards,
"""returns_to_go""": returns_to_go,
"""timesteps""": timesteps,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_torch
class __snake_case ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = (DecisionTransformerModel,) if is_torch_available() else ()
lowerCAmelCase__ = ()
lowerCAmelCase__ = {"""feature-extraction""": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
lowerCAmelCase__ = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Union[str, Any] = DecisionTransformerModelTester(self )
__snake_case: Any = ConfigTester(self , config_class=__a , hidden_size=37 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
@slow
def UpperCAmelCase__ ( self : Dict ):
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case: Dict = DecisionTransformerModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def UpperCAmelCase__ ( self : Tuple ):
__snake_case , __snake_case: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case: Optional[int] = model_class(__a )
__snake_case: Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case: Tuple = [*signature.parameters.keys()]
__snake_case: List[str] = [
"""states""",
"""actions""",
"""rewards""",
"""returns_to_go""",
"""timesteps""",
"""attention_mask""",
]
self.assertListEqual(arg_names[: len(__a )] , __a )
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Tuple = 2 # number of steps of autoregressive prediction we will perform
__snake_case: Tuple = 10 # defined by the RL environment, may be normalized
__snake_case: Optional[int] = DecisionTransformerModel.from_pretrained("""edbeeching/decision-transformer-gym-hopper-expert""" )
__snake_case: Optional[int] = model.to(__a )
__snake_case: List[Any] = model.config
torch.manual_seed(0 )
__snake_case: List[str] = torch.randn(1 , 1 , config.state_dim ).to(device=__a , dtype=torch.floataa ) # env.reset()
__snake_case: Union[str, Any] = torch.tensor(
[[0.24_2793, -0.2869_3074, 0.874_2613], [0.6781_5274, -0.0810_1085, -0.1295_2147]] , device=__a )
__snake_case: Dict = torch.tensor(__a , device=__a , dtype=torch.floataa ).reshape(1 , 1 , 1 )
__snake_case: List[str] = state
__snake_case: Optional[int] = torch.zeros(1 , 0 , config.act_dim , device=__a , dtype=torch.floataa )
__snake_case: List[str] = torch.zeros(1 , 0 , device=__a , dtype=torch.floataa )
__snake_case: Dict = torch.tensor(0 , device=__a , dtype=torch.long ).reshape(1 , 1 )
for step in range(__a ):
__snake_case: Optional[Any] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=__a )] , dim=1 )
__snake_case: Dict = torch.cat([rewards, torch.zeros(1 , 1 , device=__a )] , dim=1 )
__snake_case: Dict = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
__snake_case , __snake_case , __snake_case: Dict = model(
states=__a , actions=__a , rewards=__a , returns_to_go=__a , timesteps=__a , attention_mask=__a , return_dict=__a , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
__snake_case , __snake_case , __snake_case , __snake_case: str = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=__a , dtype=torch.floataa ),
1.0,
False,
{},
)
__snake_case: str = action_pred[0, -1]
__snake_case: int = torch.cat([states, state] , dim=1 )
__snake_case: Optional[int] = returns_to_go[0, -1] - reward
__snake_case: Optional[Any] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
__snake_case: List[str] = torch.cat(
[timesteps, torch.ones((1, 1) , device=__a , dtype=torch.long ) * (step + 1)] , dim=1 )
| 111 |
"""simple docstring"""
import torch
from torch import nn
class _UpperCAmelCase( nn.Module ):
def __init__( self , __a , __a , __a , __a , __a=1 , __a=False) -> Tuple:
'''simple docstring'''
super().__init__()
_UpperCamelCase = n_token
_UpperCamelCase = d_embed
_UpperCamelCase = d_proj
_UpperCamelCase = cutoffs + [n_token]
_UpperCamelCase = [0] + self.cutoffs
_UpperCamelCase = div_val
_UpperCamelCase = self.cutoffs[0]
_UpperCamelCase = len(self.cutoffs) - 1
_UpperCamelCase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
_UpperCamelCase = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed))
_UpperCamelCase = nn.Parameter(torch.zeros(self.n_clusters))
_UpperCamelCase = nn.ModuleList()
_UpperCamelCase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs)):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(__a , __a)))
else:
self.out_projs.append(__a)
self.out_layers.append(nn.Linear(__a , __a))
else:
for i in range(len(self.cutoffs)):
_UpperCamelCase , _UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_UpperCamelCase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(__a , __a)))
self.out_layers.append(nn.Linear(__a , r_idx - l_idx))
_UpperCamelCase = keep_order
def UpperCAmelCase ( self , __a , __a , __a , __a) -> Any:
'''simple docstring'''
if proj is None:
_UpperCamelCase = nn.functional.linear(__a , __a , bias=__a)
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
_UpperCamelCase = nn.functional.linear(__a , proj.t().contiguous())
_UpperCamelCase = nn.functional.linear(__a , __a , bias=__a)
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def UpperCAmelCase ( self , __a , __a=None , __a=False) -> Dict:
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
_UpperCamelCase = hidden[..., :-1, :].contiguous()
_UpperCamelCase = labels[..., 1:].contiguous()
_UpperCamelCase = hidden.view(-1 , hidden.size(-1))
_UpperCamelCase = labels.view(-1)
if hidden.size(0) != labels.size(0):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''')
else:
_UpperCamelCase = hidden.view(-1 , hidden.size(-1))
if self.n_clusters == 0:
_UpperCamelCase = self._compute_logit(__a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
if labels is not None:
_UpperCamelCase = labels != -1_00
_UpperCamelCase = torch.zeros_like(__a , dtype=hidden.dtype , device=hidden.device)
_UpperCamelCase = (
-nn.functional.log_softmax(__a , dim=-1)[mask].gather(1 , labels[mask].unsqueeze(1)).squeeze(1)
)
else:
_UpperCamelCase = nn.functional.log_softmax(__a , dim=-1)
else:
# construct weights and biases
_UpperCamelCase , _UpperCamelCase = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
_UpperCamelCase , _UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_UpperCamelCase = self.out_layers[0].weight[l_idx:r_idx]
_UpperCamelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
_UpperCamelCase = self.out_layers[i].weight
_UpperCamelCase = self.out_layers[i].bias
if i == 0:
_UpperCamelCase = torch.cat([weight_i, self.cluster_weight] , dim=0)
_UpperCamelCase = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(__a)
biases.append(__a)
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = weights[0], biases[0], self.out_projs[0]
_UpperCamelCase = self._compute_logit(__a , __a , __a , __a)
_UpperCamelCase = nn.functional.log_softmax(__a , dim=1)
if labels is None:
_UpperCamelCase = hidden.new_empty((head_logit.size(0), self.n_token))
else:
_UpperCamelCase = torch.zeros_like(__a , dtype=hidden.dtype , device=hidden.device)
_UpperCamelCase = 0
_UpperCamelCase = [0] + self.cutoffs
for i in range(len(__a) - 1):
_UpperCamelCase , _UpperCamelCase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
_UpperCamelCase = (labels >= l_idx) & (labels < r_idx)
_UpperCamelCase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
_UpperCamelCase = labels.index_select(0 , __a) - l_idx
_UpperCamelCase = head_logprob.index_select(0 , __a)
_UpperCamelCase = hidden.index_select(0 , __a)
else:
_UpperCamelCase = hidden
if i == 0:
if labels is not None:
_UpperCamelCase = head_logprob_i.gather(1 , target_i[:, None]).squeeze(1)
else:
_UpperCamelCase = head_logprob[:, : self.cutoffs[0]]
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = weights[i], biases[i], self.out_projs[i]
_UpperCamelCase = self._compute_logit(__a , __a , __a , __a)
_UpperCamelCase = nn.functional.log_softmax(__a , dim=1)
_UpperCamelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
_UpperCamelCase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None]).squeeze(1)
else:
_UpperCamelCase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
_UpperCamelCase = logprob_i
if labels is not None:
if (hasattr(self , '''keep_order''') and self.keep_order) or keep_order:
out.index_copy_(0 , __a , -logprob_i)
else:
out[offset : offset + logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return out
def UpperCAmelCase ( self , __a) -> List[str]:
'''simple docstring'''
if self.n_clusters == 0:
_UpperCamelCase = self._compute_logit(__a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
return nn.functional.log_softmax(__a , dim=-1)
else:
# construct weights and biases
_UpperCamelCase , _UpperCamelCase = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
_UpperCamelCase , _UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_UpperCamelCase = self.out_layers[0].weight[l_idx:r_idx]
_UpperCamelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
_UpperCamelCase = self.out_layers[i].weight
_UpperCamelCase = self.out_layers[i].bias
if i == 0:
_UpperCamelCase = torch.cat([weight_i, self.cluster_weight] , dim=0)
_UpperCamelCase = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(__a)
biases.append(__a)
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = weights[0], biases[0], self.out_projs[0]
_UpperCamelCase = self._compute_logit(__a , __a , __a , __a)
_UpperCamelCase = hidden.new_empty((head_logit.size(0), self.n_token))
_UpperCamelCase = nn.functional.log_softmax(__a , dim=1)
_UpperCamelCase = [0] + self.cutoffs
for i in range(len(__a) - 1):
_UpperCamelCase , _UpperCamelCase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
_UpperCamelCase = head_logprob[:, : self.cutoffs[0]]
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = weights[i], biases[i], self.out_projs[i]
_UpperCamelCase = self._compute_logit(__a , __a , __a , __a)
_UpperCamelCase = nn.functional.log_softmax(__a , dim=1)
_UpperCamelCase = head_logprob[:, -i] + tail_logprob_i
_UpperCamelCase = logprob_i
return out
| 194 | 0 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
__lowerCamelCase : str = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
__lowerCamelCase : Tuple = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : Tuple=False ):
snake_case__, snake_case__ : Tuple = create_model(
"HTSAT-tiny" , "roberta" , lowerCamelCase__ , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=lowerCamelCase__ , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : Union[str, Any] = {}
snake_case__ : List[str] = R".*sequential.(\d+).*"
snake_case__ : Optional[int] = R".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
snake_case__ : Any = key.replace(lowerCamelCase__ , lowerCamelCase__ )
if re.match(lowerCamelCase__ , lowerCamelCase__ ):
# replace sequential layers with list
snake_case__ : Any = re.match(lowerCamelCase__ , lowerCamelCase__ ).group(1 )
snake_case__ : Any = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(lowerCamelCase__ )//3}.linear.''' )
elif re.match(lowerCamelCase__ , lowerCamelCase__ ):
snake_case__ : Union[str, Any] = int(re.match(lowerCamelCase__ , lowerCamelCase__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
snake_case__ : Optional[int] = 1 if projecton_layer == 0 else 2
snake_case__ : List[str] = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
snake_case__ : List[str] = value
snake_case__ : Optional[int] = mixed_qkv.size(0 ) // 3
snake_case__ : Union[str, Any] = mixed_qkv[:qkv_dim]
snake_case__ : List[str] = mixed_qkv[qkv_dim : qkv_dim * 2]
snake_case__ : Optional[Any] = mixed_qkv[qkv_dim * 2 :]
snake_case__ : Optional[int] = query_layer
snake_case__ : List[str] = key_layer
snake_case__ : int = value_layer
else:
snake_case__ : List[str] = value
return model_state_dict
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Optional[int] , snake_case_ : List[Any] , snake_case_ : Any=False ):
snake_case__, snake_case__ : List[str] = init_clap(lowerCamelCase__ , enable_fusion=lowerCamelCase__ )
clap_model.eval()
snake_case__ : str = clap_model.state_dict()
snake_case__ : Dict = rename_state_dict(lowerCamelCase__ )
snake_case__ : Tuple = ClapConfig()
snake_case__ : int = enable_fusion
snake_case__ : Optional[int] = ClapModel(lowerCamelCase__ )
# ignore the spectrogram embedding layer
model.load_state_dict(lowerCamelCase__ , strict=lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
transformers_config.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
__lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
__lowerCamelCase : int = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 350 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = StableDiffusionInstructPixaPixPipeline
a_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "cross_attention_kwargs"}
a_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowercase ( self : List[str] ):
torch.manual_seed(0 )
snake_case__ : Any = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
snake_case__ : int = PNDMScheduler(skip_prk_steps=__A )
torch.manual_seed(0 )
snake_case__ : Union[str, Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case__ : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
snake_case__ : Union[str, Any] = CLIPTextModel(__A )
snake_case__ : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
snake_case__ : str = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _lowercase ( self : List[Any] , __A : int , __A : Any=0 ):
snake_case__ : Optional[int] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__A ) ).to(__A )
snake_case__ : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case__ : Union[str, Any] = Image.fromarray(np.uinta(__A ) ).convert("RGB" )
if str(__A ).startswith("mps" ):
snake_case__ : List[Any] = torch.manual_seed(__A )
else:
snake_case__ : Optional[int] = torch.Generator(device=__A ).manual_seed(__A )
snake_case__ : Optional[int] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"image_guidance_scale": 1,
"output_type": "numpy",
}
return inputs
def _lowercase ( self : int ):
snake_case__ : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case__ : int = self.get_dummy_components()
snake_case__ : List[Any] = StableDiffusionInstructPixaPixPipeline(**__A )
snake_case__ : List[Any] = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
snake_case__ : Tuple = self.get_dummy_inputs(__A )
snake_case__ : List[str] = sd_pipe(**__A ).images
snake_case__ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
snake_case__ : List[Any] = np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case__ : List[Any] = self.get_dummy_components()
snake_case__ : List[Any] = StableDiffusionInstructPixaPixPipeline(**__A )
snake_case__ : str = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
snake_case__ : str = self.get_dummy_inputs(__A )
snake_case__ : List[Any] = "french fries"
snake_case__ : str = sd_pipe(**__A , negative_prompt=__A )
snake_case__ : Any = output.images
snake_case__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
snake_case__ : Union[str, Any] = np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowercase ( self : Optional[int] ):
snake_case__ : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case__ : List[Any] = self.get_dummy_components()
snake_case__ : str = StableDiffusionInstructPixaPixPipeline(**__A )
snake_case__ : List[str] = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
snake_case__ : Any = self.get_dummy_inputs(__A )
snake_case__ : Tuple = [inputs["prompt"]] * 2
snake_case__ : Any = np.array(inputs["image"] ).astype(np.floataa ) / 2_5_5.0
snake_case__ : List[str] = torch.from_numpy(__A ).unsqueeze(0 ).to(__A )
snake_case__ : Union[str, Any] = image / 2 + 0.5
snake_case__ : str = image.permute(0 , 3 , 1 , 2 )
snake_case__ : int = image.repeat(2 , 1 , 1 , 1 )
snake_case__ : str = sd_pipe(**__A ).images
snake_case__ : Any = image[-1, -3:, -3:, -1]
assert image.shape == (2, 3_2, 3_2, 3)
snake_case__ : int = np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case__ : int = self.get_dummy_components()
snake_case__ : Dict = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" )
snake_case__ : Tuple = StableDiffusionInstructPixaPixPipeline(**__A )
snake_case__ : str = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
snake_case__ : str = self.get_dummy_inputs(__A )
snake_case__ : Optional[Any] = sd_pipe(**__A ).images
snake_case__ : Dict = image[0, -3:, -3:, -1]
snake_case__ : Union[str, Any] = [round(__A , 4 ) for x in image_slice.flatten().tolist()]
print(",".join([str(__A ) for x in slice] ) )
assert image.shape == (1, 3_2, 3_2, 3)
snake_case__ : str = np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowercase ( self : List[str] ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _lowercase ( self : List[Any] ):
snake_case__ : Tuple = self.get_dummy_components()
snake_case__ : Tuple = StableDiffusionInstructPixaPixPipeline(**__A )
snake_case__ : int = VaeImageProcessor(do_resize=__A , do_normalize=__A )
snake_case__ : Any = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
snake_case__ : Dict = pipe(**self.get_dummy_inputs_by_type(__A , input_image_type="pt" ) )[0]
snake_case__ : int = components["vae"]
snake_case__ : Union[str, Any] = self.get_dummy_inputs_by_type(__A , input_image_type="pt" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
snake_case__ : Optional[int] = vae.encode(inputs[image_param] ).latent_dist.mode()
snake_case__ : str = pipe(**__A )[0]
snake_case__ : Dict = np.abs(out - out_latents_inputs ).max()
self.assertLess(__A , 1e-4 , "passing latents as image input generate different result from passing image" )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[int] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : str , __A : Dict=0 ):
snake_case__ : Optional[int] = torch.manual_seed(__A )
snake_case__ : Tuple = load_image(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg" )
snake_case__ : Optional[Any] = {
"prompt": "turn him into a cyborg",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"image_guidance_scale": 1.0,
"output_type": "numpy",
}
return inputs
def _lowercase ( self : int ):
snake_case__ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
snake_case__ : Union[str, Any] = self.get_inputs()
snake_case__ : Union[str, Any] = pipe(**__A ).images
snake_case__ : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ : Any = np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _lowercase ( self : str ):
snake_case__ : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=__A )
snake_case__ : Dict = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
snake_case__ : List[str] = self.get_inputs()
snake_case__ : Any = pipe(**__A ).images
snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ : Optional[Any] = np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _lowercase ( self : Dict ):
snake_case__ : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=__A )
snake_case__ : List[str] = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
snake_case__ : int = self.get_inputs()
snake_case__ : Union[str, Any] = pipe(**__A ).images
snake_case__ : Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ : Union[str, Any] = np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _lowercase ( self : List[Any] ):
snake_case__ : Optional[Any] = 0
def callback_fn(__A : int , __A : int , __A : torch.FloatTensor ) -> None:
snake_case__ : Union[str, Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
snake_case__ : Optional[Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
snake_case__ : int = latents[0, -3:, -3:, -1]
snake_case__ : Optional[int] = np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
snake_case__ : int = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
snake_case__ : Any = latents[0, -3:, -3:, -1]
snake_case__ : Dict = np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
snake_case__ : Any = False
snake_case__ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=__A , torch_dtype=torch.floataa )
snake_case__ : int = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
snake_case__ : Optional[Any] = self.get_inputs()
pipe(**__A , callback=__A , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _lowercase ( self : List[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case__ : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=__A , torch_dtype=torch.floataa )
snake_case__ : Tuple = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case__ : Dict = self.get_inputs()
snake_case__ : List[Any] = pipe(**__A )
snake_case__ : List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 1_0**9
def _lowercase ( self : Tuple ):
snake_case__ : int = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
snake_case__ : Union[str, Any] = inputs["image"].resize((5_0_4, 5_0_4) )
snake_case__ : Optional[Any] = "timbrooks/instruct-pix2pix"
snake_case__ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
__A , safety_checker=__A , )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
snake_case__ : Union[str, Any] = pipe(**__A )
snake_case__ : Tuple = output.images[0]
snake_case__ : List[Any] = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 5_0_4, 3)
snake_case__ : int = np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 286 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
a__ : List[str] = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
a__ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 54 |
"""simple docstring"""
lowerCamelCase_ = [
(1000, '''M'''),
(900, '''CM'''),
(500, '''D'''),
(400, '''CD'''),
(100, '''C'''),
(90, '''XC'''),
(50, '''L'''),
(40, '''XL'''),
(10, '''X'''),
(9, '''IX'''),
(5, '''V'''),
(4, '''IV'''),
(1, '''I'''),
]
def snake_case ( A__ ):
UpperCAmelCase_ : List[str] = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 1_00, "D": 5_00, "M": 10_00}
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : Tuple = 0
while place < len(A__ ):
if (place + 1 < len(A__ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def snake_case ( A__ ):
UpperCAmelCase_ : Union[str, Any] = []
for arabic, roman in ROMAN:
((UpperCAmelCase_) , (UpperCAmelCase_)) : str = divmod(A__ ,A__ )
result.append(roman * factor )
if number == 0:
break
return "".join(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 268 | 0 |
'''simple docstring'''
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
a_ = logging.get_logger(__name__)
a_ = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class __SCREAMING_SNAKE_CASE :
def __init__( self : Any , __lowercase : List[str]=None , **__lowercase : Dict ) -> Optional[Any]:
logger.info('''`diffusers.OnnxRuntimeModel` is experimental and might change in the future.''' )
SCREAMING_SNAKE_CASE__ : List[str] =model
SCREAMING_SNAKE_CASE__ : str =kwargs.get('''model_save_dir''' , __lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =kwargs.get('''latest_model_name''' , __lowercase )
def __call__( self : int , **__lowercase : str ) -> Any:
SCREAMING_SNAKE_CASE__ : Any ={k: np.array(__lowercase ) for k, v in kwargs.items()}
return self.model.run(__lowercase , __lowercase )
@staticmethod
def __magic_name__ ( __lowercase : Union[str, Path] , __lowercase : Optional[Any]=None , __lowercase : Tuple=None ) -> Any:
if provider is None:
logger.info('''No onnxruntime provider specified, using CPUExecutionProvider''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''CPUExecutionProvider'''
return ort.InferenceSession(__lowercase , providers=[provider] , sess_options=__lowercase )
def __magic_name__ ( self : List[str] , __lowercase : Union[str, Path] , __lowercase : Optional[str] = None , **__lowercase : Any ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[Any] =file_name if file_name is not None else ONNX_WEIGHTS_NAME
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.model_save_dir.joinpath(self.latest_model_name )
SCREAMING_SNAKE_CASE__ : Any =Path(__lowercase ).joinpath(__lowercase )
try:
shutil.copyfile(__lowercase , __lowercase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_save_dir.joinpath(__lowercase )
if src_path.exists():
SCREAMING_SNAKE_CASE__ : str =Path(__lowercase ).joinpath(__lowercase )
try:
shutil.copyfile(__lowercase , __lowercase )
except shutil.SameFileError:
pass
def __magic_name__ ( self : Tuple , __lowercase : Union[str, os.PathLike] , **__lowercase : List[Any] , ) -> Any:
if os.path.isfile(__lowercase ):
logger.error(F"Provided path ({save_directory}) should be a directory, not a file" )
return
os.makedirs(__lowercase , exist_ok=__lowercase )
# saving model weights/files
self._save_pretrained(__lowercase , **__lowercase )
@classmethod
def __magic_name__ ( cls : List[Any] , __lowercase : Union[str, Path] , __lowercase : Optional[Union[bool, str, None]] = None , __lowercase : Optional[Union[str, None]] = None , __lowercase : bool = False , __lowercase : Optional[str] = None , __lowercase : Optional[str] = None , __lowercase : Optional[str] = None , __lowercase : Optional["ort.SessionOptions"] = None , **__lowercase : Union[str, Any] , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Tuple =file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(__lowercase ):
SCREAMING_SNAKE_CASE__ : int =OnnxRuntimeModel.load_model(
os.path.join(__lowercase , __lowercase ) , provider=__lowercase , sess_options=__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =Path(__lowercase )
# load model from hub
else:
# download model
SCREAMING_SNAKE_CASE__ : int =hf_hub_download(
repo_id=__lowercase , filename=__lowercase , use_auth_token=__lowercase , revision=__lowercase , cache_dir=__lowercase , force_download=__lowercase , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =Path(__lowercase ).parent
SCREAMING_SNAKE_CASE__ : List[str] =Path(__lowercase ).name
SCREAMING_SNAKE_CASE__ : int =OnnxRuntimeModel.load_model(__lowercase , provider=__lowercase , sess_options=__lowercase )
return cls(model=__lowercase , **__lowercase )
@classmethod
def __magic_name__ ( cls : List[Any] , __lowercase : Union[str, Path] , __lowercase : bool = True , __lowercase : Optional[str] = None , __lowercase : Optional[str] = None , **__lowercase : List[str] , ) -> Dict:
SCREAMING_SNAKE_CASE__ : Dict =None
if len(str(__lowercase ).split('''@''' ) ) == 2:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] =model_id.split('''@''' )
return cls._from_pretrained(
model_id=__lowercase , revision=__lowercase , cache_dir=__lowercase , force_download=__lowercase , use_auth_token=__lowercase , **__lowercase , ) | 222 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
a_ = [
'good first issue',
'good second issue',
'good difficult issue',
'feature request',
'new model',
'wip',
]
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] =Github(os.environ['''GITHUB_TOKEN'''] )
SCREAMING_SNAKE_CASE__ : List[Any] =g.get_repo('''huggingface/transformers''' )
SCREAMING_SNAKE_CASE__ : List[Any] =repo.get_issues(state='''open''' )
for issue in open_issues:
SCREAMING_SNAKE_CASE__ : List[Any] =sorted([comment for comment in issue.get_comments()], key=lambda UpperCamelCase__ : i.created_at, reverse=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Dict =comments[0] if len(UpperCamelCase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main() | 222 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__SCREAMING_SNAKE_CASE : Dict = {
"""configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""],
"""configuration_data2vec_text""": [
"""DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecTextConfig""",
"""Data2VecTextOnnxConfig""",
],
"""configuration_data2vec_vision""": [
"""DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecVisionConfig""",
"""Data2VecVisionOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = [
"""DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecAudioForAudioFrameClassification""",
"""Data2VecAudioForCTC""",
"""Data2VecAudioForSequenceClassification""",
"""Data2VecAudioForXVector""",
"""Data2VecAudioModel""",
"""Data2VecAudioPreTrainedModel""",
]
__SCREAMING_SNAKE_CASE : str = [
"""DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecTextForCausalLM""",
"""Data2VecTextForMaskedLM""",
"""Data2VecTextForMultipleChoice""",
"""Data2VecTextForQuestionAnswering""",
"""Data2VecTextForSequenceClassification""",
"""Data2VecTextForTokenClassification""",
"""Data2VecTextModel""",
"""Data2VecTextPreTrainedModel""",
]
__SCREAMING_SNAKE_CASE : Tuple = [
"""DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecVisionForImageClassification""",
"""Data2VecVisionForMaskedImageModeling""",
"""Data2VecVisionForSemanticSegmentation""",
"""Data2VecVisionModel""",
"""Data2VecVisionPreTrainedModel""",
]
if is_tf_available():
__SCREAMING_SNAKE_CASE : Optional[Any] = [
"""TFData2VecVisionForImageClassification""",
"""TFData2VecVisionForSemanticSegmentation""",
"""TFData2VecVisionModel""",
"""TFData2VecVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 31 | '''simple docstring'''
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Tuple , A : Any , A : str , A : Union[str, Any] ):
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Any = graph
self._normalize_graph(A , A )
_UpperCAmelCase : List[str] = len(A )
_UpperCAmelCase : Tuple = None
def _A ( self : Any , A : List[Any] , A : str ):
if sources is int:
_UpperCAmelCase : List[Any] = [sources]
if sinks is int:
_UpperCAmelCase : List[Any] = [sinks]
if len(A ) == 0 or len(A ) == 0:
return
_UpperCAmelCase : str = sources[0]
_UpperCAmelCase : Union[str, Any] = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(A ) > 1 or len(A ) > 1:
_UpperCAmelCase : Dict = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
_UpperCAmelCase : str = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
_UpperCAmelCase : Optional[Any] = max_input_flow
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : str = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_UpperCAmelCase : Dict = max_input_flow
_UpperCAmelCase : List[Any] = size - 1
def _A ( self : Union[str, Any] ):
if self.maximum_flow_algorithm is None:
raise Exception("You need to set maximum flow algorithm before." )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def _A ( self : Tuple , A : Dict ):
_UpperCAmelCase : str = algorithm(self )
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Any , A : str ):
_UpperCAmelCase : Optional[int] = flow_network
_UpperCAmelCase : Any = flow_network.verticesCount
_UpperCAmelCase : List[str] = flow_network.sourceIndex
_UpperCAmelCase : Union[str, Any] = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_UpperCAmelCase : Any = flow_network.graph
_UpperCAmelCase : Union[str, Any] = False
def _A ( self : List[str] ):
if not self.executed:
self._algorithm()
_UpperCAmelCase : int = True
def _A ( self : List[Any] ):
pass
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def __init__( self : Optional[int] , A : Union[str, Any] ):
super().__init__(A )
# use this to save your result
_UpperCAmelCase : Any = -1
def _A ( self : Union[str, Any] ):
if not self.executed:
raise Exception("You should execute algorithm before using its result!" )
return self.maximum_flow
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def __init__( self : Tuple , A : int ):
super().__init__(A )
_UpperCAmelCase : List[str] = [[0] * self.verticies_count for i in range(self.verticies_count )]
_UpperCAmelCase : Union[str, Any] = [0] * self.verticies_count
_UpperCAmelCase : int = [0] * self.verticies_count
def _A ( self : Dict ):
_UpperCAmelCase : Dict = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_UpperCAmelCase : Optional[int] = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_UpperCAmelCase : Any = 0
while i < len(A ):
_UpperCAmelCase : int = vertices_list[i]
_UpperCAmelCase : int = self.heights[vertex_index]
self.process_vertex(A )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(A ) )
_UpperCAmelCase : Union[str, Any] = 0
else:
i += 1
_UpperCAmelCase : List[Any] = sum(self.preflow[self.source_index] )
def _A ( self : Union[str, Any] , A : str ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(A , A )
self.relabel(A )
def _A ( self : int , A : Dict , A : List[str] ):
_UpperCAmelCase : int = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def _A ( self : Optional[int] , A : Union[str, Any] ):
_UpperCAmelCase : str = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_UpperCAmelCase : Tuple = self.heights[to_index]
if min_height is not None:
_UpperCAmelCase : Optional[Any] = min_height + 1
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[int] = [0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__SCREAMING_SNAKE_CASE : List[Any] = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__SCREAMING_SNAKE_CASE : Union[str, Any] = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__SCREAMING_SNAKE_CASE : Optional[Any] = flow_network.find_maximum_flow()
print(F'maximum flow is {maximum_flow}')
| 31 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
a : Optional[int] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __UpperCAmelCase( lowercase__ ):
"""simple docstring"""
__lowerCamelCase = ["""pixel_values"""]
def __init__( self , snake_case__ = True , snake_case__ = None , snake_case__ = PILImageResampling.BICUBIC , snake_case__ = True , snake_case__ = None , snake_case__ = True , snake_case__ = 1 / 255 , snake_case__ = True , snake_case__ = None , snake_case__ = None , snake_case__ = True , **snake_case__ , ):
'''simple docstring'''
super().__init__(**lowercase_ )
lowercase__ : List[str]= size if size is not None else {"shortest_edge": 224}
lowercase__ : Tuple= get_size_dict(lowercase_ , default_to_square=lowercase_ )
lowercase__ : Union[str, Any]= crop_size if crop_size is not None else {"height": 224, "width": 224}
lowercase__ : int= get_size_dict(lowercase_ , default_to_square=lowercase_ , param_name="crop_size" )
lowercase__ : Any= do_resize
lowercase__ : List[Any]= size
lowercase__ : Union[str, Any]= resample
lowercase__ : int= do_center_crop
lowercase__ : str= crop_size
lowercase__ : Optional[Any]= do_rescale
lowercase__ : Tuple= rescale_factor
lowercase__ : Union[str, Any]= do_normalize
lowercase__ : Dict= image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase__ : Tuple= image_std if image_std is not None else OPENAI_CLIP_STD
lowercase__ : List[Any]= do_convert_rgb
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = PILImageResampling.BICUBIC , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
lowercase__ : List[Any]= get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowercase__ : str= get_resize_output_image_size(lowercase_ , size=size["shortest_edge"] , default_to_square=lowercase_ )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
lowercase__ : Union[str, Any]= get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(lowercase_ , size=(size["height"], size["width"]) , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = ChannelDimension.FIRST , **snake_case__ , ):
'''simple docstring'''
lowercase__ : List[Any]= do_resize if do_resize is not None else self.do_resize
lowercase__ : List[str]= size if size is not None else self.size
lowercase__ : str= get_size_dict(lowercase_ , param_name="size" , default_to_square=lowercase_ )
lowercase__ : Optional[Any]= resample if resample is not None else self.resample
lowercase__ : Union[str, Any]= do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ : Tuple= crop_size if crop_size is not None else self.crop_size
lowercase__ : List[Any]= get_size_dict(lowercase_ , param_name="crop_size" , default_to_square=lowercase_ )
lowercase__ : Union[str, Any]= do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : List[str]= rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Any= do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : List[Any]= image_mean if image_mean is not None else self.image_mean
lowercase__ : Optional[Any]= image_std if image_std is not None else self.image_std
lowercase__ : str= do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase__ : Tuple= make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase__ : List[str]= [convert_to_rgb(lowercase_ ) for image in images]
# All transformations expect numpy arrays.
lowercase__ : Dict= [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
lowercase__ : List[str]= [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_center_crop:
lowercase__ : List[str]= [self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images]
if do_rescale:
lowercase__ : Optional[Any]= [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
lowercase__ : List[Any]= [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
lowercase__ : str= [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
lowercase__ : Tuple= {"pixel_values": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 351 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
a : Union[str, Any] = False
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowercase__ : Any= "A painting of a squirrel eating a burger "
lowercase__ : Optional[Any]= torch.manual_seed(0 )
lowercase__ : List[str]= pipe(
prompt=snake_case__ , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(snake_case__ )
lowercase__ : Optional[Any]= VersatileDiffusionTextToImagePipeline.from_pretrained(snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowercase__ : Any= generator.manual_seed(0 )
lowercase__ : Tuple= pipe(
prompt=snake_case__ , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowercase__ : List[str]= "A painting of a squirrel eating a burger "
lowercase__ : Union[str, Any]= torch.manual_seed(0 )
lowercase__ : Optional[Any]= pipe(
prompt=snake_case__ , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
lowercase__ : List[str]= image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ : Optional[int]= np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 150 | 0 |
"""simple docstring"""
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
lowerCamelCase__ : str = logging.get_logger(__name__)
# General docstring
lowerCamelCase__ : str = '''PoolFormerConfig'''
# Base docstring
lowerCamelCase__ : Any = '''sail/poolformer_s12'''
lowerCamelCase__ : Optional[Any] = [1, 5_12, 7, 7]
# Image classification docstring
lowerCamelCase__ : str = '''sail/poolformer_s12'''
lowerCamelCase__ : List[Any] = '''tabby, tabby cat'''
lowerCamelCase__ : Union[str, Any] = [
'''sail/poolformer_s12''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def UpperCamelCase ( _lowerCAmelCase : List[str], _lowerCAmelCase : float = 0.0, _lowerCAmelCase : bool = False ) -> Union[str, Any]:
if drop_prob == 0.0 or not training:
return input
_UpperCAmelCase : List[Any] = 1 - drop_prob
_UpperCAmelCase : Optional[int] = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
_UpperCAmelCase : List[Any] = keep_prob + torch.rand(_lowerCAmelCase, dtype=input.dtype, device=input.device )
random_tensor.floor_() # binarize
_UpperCAmelCase : str = input.div(_lowerCAmelCase ) * random_tensor
return output
class _UpperCAmelCase ( nn.Module):
def __init__( self , _A = None ) -> None:
'''simple docstring'''
super().__init__()
_UpperCAmelCase : List[str] = drop_prob
def __snake_case ( self , _A ) -> torch.Tensor:
'''simple docstring'''
return drop_path(_A , self.drop_prob , self.training )
def __snake_case ( self ) -> str:
'''simple docstring'''
return "p={}".format(self.drop_prob )
class _UpperCAmelCase ( nn.Module):
def __init__( self , _A , _A , _A , _A , _A , _A=None ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
_UpperCAmelCase : Any = patch_size if isinstance(_A , collections.abc.Iterable ) else (patch_size, patch_size)
_UpperCAmelCase : Any = stride if isinstance(_A , collections.abc.Iterable ) else (stride, stride)
_UpperCAmelCase : List[str] = padding if isinstance(_A , collections.abc.Iterable ) else (padding, padding)
_UpperCAmelCase : str = nn.Convad(_A , _A , kernel_size=_A , stride=_A , padding=_A )
_UpperCAmelCase : Optional[int] = norm_layer(_A ) if norm_layer else nn.Identity()
def __snake_case ( self , _A ) -> Any:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.projection(_A )
_UpperCAmelCase : int = self.norm(_A )
return embeddings
class _UpperCAmelCase ( nn.GroupNorm):
def __init__( self , _A , **_A ) -> str:
'''simple docstring'''
super().__init__(1 , _A , **_A )
class _UpperCAmelCase ( nn.Module):
def __init__( self , _A ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
_UpperCAmelCase : List[Any] = nn.AvgPoolad(_A , stride=1 , padding=pool_size // 2 , count_include_pad=_A )
def __snake_case ( self , _A ) -> int:
'''simple docstring'''
return self.pool(_A ) - hidden_states
class _UpperCAmelCase ( nn.Module):
def __init__( self , _A , _A , _A , _A ) -> Any:
'''simple docstring'''
super().__init__()
_UpperCAmelCase : Union[str, Any] = nn.Convad(_A , _A , 1 )
_UpperCAmelCase : Dict = nn.Convad(_A , _A , 1 )
_UpperCAmelCase : List[Any] = PoolFormerDropPath(_A )
if isinstance(config.hidden_act , _A ):
_UpperCAmelCase : Union[str, Any] = ACTaFN[config.hidden_act]
else:
_UpperCAmelCase : str = config.hidden_act
def __snake_case ( self , _A ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.conva(_A )
_UpperCAmelCase : Any = self.act_fn(_A )
_UpperCAmelCase : Dict = self.drop(_A )
_UpperCAmelCase : str = self.conva(_A )
_UpperCAmelCase : int = self.drop(_A )
return hidden_states
class _UpperCAmelCase ( nn.Module):
def __init__( self , _A , _A , _A , _A , _A , _A ) -> str:
'''simple docstring'''
super().__init__()
_UpperCAmelCase : int = PoolFormerPooling(_A )
_UpperCAmelCase : List[Any] = PoolFormerOutput(_A , _A , _A , _A )
_UpperCAmelCase : int = PoolFormerGroupNorm(_A )
_UpperCAmelCase : Dict = PoolFormerGroupNorm(_A )
# Useful for training neural nets
_UpperCAmelCase : Optional[int] = PoolFormerDropPath(_A ) if drop_path > 0.0 else nn.Identity()
_UpperCAmelCase : Dict = config.use_layer_scale
if config.use_layer_scale:
_UpperCAmelCase : Union[str, Any] = nn.Parameter(
config.layer_scale_init_value * torch.ones((_A) ) , requires_grad=_A )
_UpperCAmelCase : List[str] = nn.Parameter(
config.layer_scale_init_value * torch.ones((_A) ) , requires_grad=_A )
def __snake_case ( self , _A ) -> Union[str, Any]:
'''simple docstring'''
if self.use_layer_scale:
_UpperCAmelCase : List[Any] = self.pooling(self.before_norm(_A ) )
_UpperCAmelCase : Any = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
_UpperCAmelCase : Dict = hidden_states + self.drop_path(_A )
_UpperCAmelCase : Optional[Any] = ()
_UpperCAmelCase : List[Any] = self.output(self.after_norm(_A ) )
_UpperCAmelCase : Any = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
_UpperCAmelCase : str = hidden_states + self.drop_path(_A )
_UpperCAmelCase : Optional[Any] = (output,) + outputs
return outputs
else:
_UpperCAmelCase : str = self.drop_path(self.pooling(self.before_norm(_A ) ) )
# First residual connection
_UpperCAmelCase : Optional[Any] = pooling_output + hidden_states
_UpperCAmelCase : Optional[Any] = ()
# Second residual connection inside the PoolFormerOutput block
_UpperCAmelCase : List[Any] = self.drop_path(self.output(self.after_norm(_A ) ) )
_UpperCAmelCase : Dict = hidden_states + layer_output
_UpperCAmelCase : Optional[int] = (output,) + outputs
return outputs
class _UpperCAmelCase ( nn.Module):
def __init__( self , _A ) -> Dict:
'''simple docstring'''
super().__init__()
_UpperCAmelCase : Optional[Any] = config
# stochastic depth decay rule
_UpperCAmelCase : Union[str, Any] = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
_UpperCAmelCase : Any = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
_UpperCAmelCase : List[str] = nn.ModuleList(_A )
# Transformer blocks
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : List[str] = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
_UpperCAmelCase : int = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
_A , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(_A ) )
_UpperCAmelCase : Union[str, Any] = nn.ModuleList(_A )
def __snake_case ( self , _A , _A=False , _A=True ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[str] = () if output_hidden_states else None
_UpperCAmelCase : Dict = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = layers
# Get patch embeddings from hidden_states
_UpperCAmelCase : Tuple = embedding_layer(_A )
# Send the embeddings through the blocks
for _, blk in enumerate(_A ):
_UpperCAmelCase : Optional[Any] = blk(_A )
_UpperCAmelCase : Dict = layer_outputs[0]
if output_hidden_states:
_UpperCAmelCase : str = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_A , hidden_states=_A )
class _UpperCAmelCase ( __a):
__a : str = PoolFormerConfig
__a : Optional[int] = """poolformer"""
__a : Optional[int] = """pixel_values"""
__a : Optional[Any] = True
def __snake_case ( self , _A ) -> Tuple:
'''simple docstring'''
if isinstance(_A , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_A , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def __snake_case ( self , _A , _A=False ) -> Optional[Any]:
'''simple docstring'''
if isinstance(_A , _A ):
_UpperCAmelCase : Tuple = value
lowerCamelCase__ : Any = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
lowerCamelCase__ : int = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
'''
@add_start_docstrings(
"""The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.""" , __a , )
class _UpperCAmelCase ( __a):
def __init__( self , _A ) -> List[Any]:
'''simple docstring'''
super().__init__(_A )
_UpperCAmelCase : Optional[Any] = config
_UpperCAmelCase : List[Any] = PoolFormerEncoder(_A )
# Initialize weights and apply final processing
self.post_init()
def __snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_A , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __snake_case ( self , _A = None , _A = None , _A = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
'''simple docstring'''
_UpperCAmelCase : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
_UpperCAmelCase : Optional[Any] = self.encoder(
_A , output_hidden_states=_A , return_dict=_A , )
_UpperCAmelCase : Dict = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=_A , hidden_states=encoder_outputs.hidden_states , )
class _UpperCAmelCase ( nn.Module):
def __init__( self , _A ) -> Dict:
'''simple docstring'''
super().__init__()
_UpperCAmelCase : List[Any] = nn.Linear(config.hidden_size , config.hidden_size )
def __snake_case ( self , _A ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.dense(_A )
return output
@add_start_docstrings(
"""
PoolFormer Model transformer with an image classification head on top
""" , __a , )
class _UpperCAmelCase ( __a):
def __init__( self , _A ) -> Optional[int]:
'''simple docstring'''
super().__init__(_A )
_UpperCAmelCase : int = config.num_labels
_UpperCAmelCase : List[str] = PoolFormerModel(_A )
# Final norm
_UpperCAmelCase : Dict = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
_UpperCAmelCase : Union[str, Any] = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __snake_case ( self , _A = None , _A = None , _A = None , _A = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
'''simple docstring'''
_UpperCAmelCase : int = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Union[str, Any] = self.poolformer(
_A , output_hidden_states=_A , return_dict=_A , )
_UpperCAmelCase : Union[str, Any] = outputs[0]
_UpperCAmelCase : Optional[int] = self.classifier(self.norm(_A ).mean([-2, -1] ) )
_UpperCAmelCase : Dict = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_UpperCAmelCase : Optional[int] = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_UpperCAmelCase : Dict = """single_label_classification"""
else:
_UpperCAmelCase : Optional[Any] = """multi_label_classification"""
if self.config.problem_type == "regression":
_UpperCAmelCase : Dict = MSELoss()
if self.num_labels == 1:
_UpperCAmelCase : Optional[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_UpperCAmelCase : List[Any] = loss_fct(_A , _A )
elif self.config.problem_type == "single_label_classification":
_UpperCAmelCase : Union[str, Any] = CrossEntropyLoss()
_UpperCAmelCase : Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_UpperCAmelCase : Optional[Any] = BCEWithLogitsLoss()
_UpperCAmelCase : int = loss_fct(_A , _A )
if not return_dict:
_UpperCAmelCase : Union[str, Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_A , logits=_A , hidden_states=outputs.hidden_states )
| 246 |
"""simple docstring"""
import math
lowerCamelCase__ : List[Any] = 10
lowerCamelCase__ : Optional[int] = 7
lowerCamelCase__ : Dict = BALLS_PER_COLOUR * NUM_COLOURS
def UpperCamelCase ( _lowerCAmelCase : int = 20 ) -> str:
_UpperCAmelCase : List[str] = math.comb(_lowerCAmelCase, _lowerCAmelCase )
_UpperCAmelCase : Optional[int] = math.comb(NUM_BALLS - BALLS_PER_COLOUR, _lowerCAmelCase )
_UpperCAmelCase : List[str] = NUM_COLOURS * (1 - missing_colour / total)
return f'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 246 | 1 |
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase: int = logging.get_logger(__name__)
__lowercase: Any = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Dict = 'encodec'
def __init__( self : int, a_ : List[str]=[1.5, 3.0, 6.0, 12.0, 24.0], a_ : Optional[int]=2_4000, a_ : List[Any]=1, a_ : Optional[Any]=False, a_ : Optional[Any]=None, a_ : Optional[int]=None, a_ : List[Any]=128, a_ : Any=32, a_ : str=1, a_ : Optional[int]=[8, 5, 4, 2], a_ : Tuple="weight_norm", a_ : Dict=7, a_ : Union[str, Any]=7, a_ : Tuple=3, a_ : List[str]=2, a_ : Any=True, a_ : str="reflect", a_ : Optional[Any]=2, a_ : Optional[Any]=2, a_ : Union[str, Any]=1.0, a_ : Optional[Any]=1024, a_ : Optional[int]=None, a_ : str=True, **a_ : List[str], ):
"""simple docstring"""
UpperCamelCase__ = target_bandwidths
UpperCamelCase__ = sampling_rate
UpperCamelCase__ = audio_channels
UpperCamelCase__ = normalize
UpperCamelCase__ = chunk_length_s
UpperCamelCase__ = overlap
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_filters
UpperCamelCase__ = num_residual_layers
UpperCamelCase__ = upsampling_ratios
UpperCamelCase__ = norm_type
UpperCamelCase__ = kernel_size
UpperCamelCase__ = last_kernel_size
UpperCamelCase__ = residual_kernel_size
UpperCamelCase__ = dilation_growth_rate
UpperCamelCase__ = use_causal_conv
UpperCamelCase__ = pad_mode
UpperCamelCase__ = compress
UpperCamelCase__ = num_lstm_layers
UpperCamelCase__ = trim_right_ratio
UpperCamelCase__ = codebook_size
UpperCamelCase__ = codebook_dim if codebook_dim is not None else hidden_size
UpperCamelCase__ = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' )
super().__init__(**a_ )
@property
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def lowercase_ ( self : Dict ):
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1, int((1.0 - self.overlap) * self.chunk_length ) )
@property
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def lowercase_ ( self : Dict ):
"""simple docstring"""
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) ) | 31 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
__lowercase: str = random.Random()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int]=1.0 , _UpperCamelCase : Dict=None , _UpperCamelCase : List[str]=None ) -> Union[str, Any]:
'''simple docstring'''
if rng is None:
UpperCamelCase__ = global_rng
UpperCamelCase__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase ( unittest.TestCase):
def __init__( self : List[Any], a_ : List[str], a_ : Any=7, a_ : Dict=400, a_ : str=2000, a_ : List[Any]=24, a_ : int=24, a_ : int=0.0, a_ : Union[str, Any]=1_6000, a_ : Union[str, Any]=True, a_ : Optional[Any]=True, ):
"""simple docstring"""
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = min_seq_length
UpperCamelCase__ = max_seq_length
UpperCamelCase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase__ = feature_size
UpperCamelCase__ = num_mel_bins
UpperCamelCase__ = padding_value
UpperCamelCase__ = sampling_rate
UpperCamelCase__ = return_attention_mask
UpperCamelCase__ = do_normalize
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase_ ( self : Optional[Any], a_ : Union[str, Any]=False, a_ : Optional[int]=False ):
"""simple docstring"""
def _flatten(a_ : Dict ):
return list(itertools.chain(*a_ ) )
if equal_length:
UpperCamelCase__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
UpperCamelCase__ = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Dict = SpeechaTextFeatureExtractor if is_speech_available() else None
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = SpeechaTextFeatureExtractionTester(self )
def lowercase_ ( self : Optional[int], a_ : Tuple ):
"""simple docstring"""
self.assertTrue(np.all(np.mean(a_, axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(a_, axis=0 ) - 1 ) < 1e-3 ) )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test feature size
UpperCamelCase__ = feature_extractor(a_, padding=a_, return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
UpperCamelCase__ = feature_extractor(speech_inputs[0], return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(np_speech_inputs[0], return_tensors="np" ).input_features
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
# Test batched
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_, a_ ):
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase__ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase__ = np.asarray(a_ )
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_, a_ ):
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__ = [None, 16, None]
for max_length, padding in zip(a_, a_ ):
UpperCamelCase__ = feature_extractor(
a_, padding=a_, max_length=a_, return_attention_mask=a_ )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__ = [None, 16, None]
for max_length, padding in zip(a_, a_ ):
UpperCamelCase__ = feature_extractor(
a_, max_length=a_, padding=a_, return_tensors="np", return_attention_mask=a_ )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="max_length", max_length=4, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="longest", max_length=4, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 4, 24) )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="longest", max_length=16, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 6, 24) )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
import torch
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = np.random.rand(100, 32 ).astype(np.floataa )
UpperCamelCase__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowercase_ ( self : List[str], a_ : int ):
"""simple docstring"""
from datasets import load_dataset
UpperCamelCase__ = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation" )
# automatic decoding with librispeech
UpperCamelCase__ = ds.sort("id" ).select(range(a_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = np.array([
-1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241,
-1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128,
-1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625,
] )
# fmt: on
UpperCamelCase__ = self._load_datasamples(1 )
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = feature_extractor(a_, return_tensors="pt" ).input_features
self.assertEquals(input_features.shape, (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30], a_, atol=1e-4 ) ) | 31 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096""": """https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"""
),
}
class UpperCAmelCase ( A_ ):
A__ : Optional[Any] = "longformer"
def __init__(self : Optional[Any] , snake_case__ : Union[List[int], int] = 5_12 , snake_case__ : int = 2 , snake_case__ : int = 1 , snake_case__ : int = 0 , snake_case__ : int = 2 , snake_case__ : int = 3_05_22 , snake_case__ : int = 7_68 , snake_case__ : int = 12 , snake_case__ : int = 12 , snake_case__ : int = 30_72 , snake_case__ : str = "gelu" , snake_case__ : float = 0.1 , snake_case__ : float = 0.1 , snake_case__ : int = 5_12 , snake_case__ : int = 2 , snake_case__ : float = 0.02 , snake_case__ : float = 1e-12 , snake_case__ : bool = False , **snake_case__ : List[Any] , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , **snake_case__ )
snake_case : List[Any] = attention_window
snake_case : Any = sep_token_id
snake_case : str = bos_token_id
snake_case : List[str] = eos_token_id
snake_case : Optional[Any] = vocab_size
snake_case : List[str] = hidden_size
snake_case : Dict = num_hidden_layers
snake_case : Tuple = num_attention_heads
snake_case : str = hidden_act
snake_case : List[str] = intermediate_size
snake_case : Any = hidden_dropout_prob
snake_case : Union[str, Any] = attention_probs_dropout_prob
snake_case : int = max_position_embeddings
snake_case : int = type_vocab_size
snake_case : Dict = initializer_range
snake_case : Union[str, Any] = layer_norm_eps
snake_case : List[str] = onnx_export
class UpperCAmelCase ( A_ ):
def __init__(self : Dict , snake_case__ : "PretrainedConfig" , snake_case__ : str = "default" , snake_case__ : "List[PatchingSpec]" = None ) -> Dict:
'''simple docstring'''
super().__init__(snake_case__ , snake_case__ , snake_case__ )
snake_case : int = True
@property
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case : List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case : int = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
snake_case : Any = super().outputs
if self.task == "default":
snake_case : List[Any] = {0: "batch"}
return outputs
@property
def _SCREAMING_SNAKE_CASE (self : int ) -> float:
'''simple docstring'''
return 1e-4
@property
def _SCREAMING_SNAKE_CASE (self : str ) -> int:
'''simple docstring'''
return max(super().default_onnx_opset , 14 )
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : "PreTrainedTokenizerBase" , snake_case__ : int = -1 , snake_case__ : int = -1 , snake_case__ : bool = False , snake_case__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
snake_case : int = super().generate_dummy_inputs(
preprocessor=snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
snake_case : Optional[int] = torch.zeros_like(inputs["input_ids"] )
# make every second token global
snake_case : Union[str, Any] = 1
return inputs
| 59 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , snake_case_ = 7_6_8 , ):
"""simple docstring"""
super().__init__()
A_ : Optional[int] = nn.Parameter(torch.zeros(1 , snake_case_ ) )
A_ : Optional[int] = nn.Parameter(torch.ones(1 , snake_case_ ) )
def lowerCamelCase_ ( self , snake_case_ = None , snake_case_ = None , ):
"""simple docstring"""
A_ : str = nn.Parameter(self.mean.to(snake_case_ ).to(snake_case_ ) )
A_ : Optional[int] = nn.Parameter(self.std.to(snake_case_ ).to(snake_case_ ) )
return self
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : Tuple = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : List[str] = (embeds * self.std) + self.mean
return embeds | 286 | 0 |
'''simple docstring'''
from __future__ import annotations
from random import choice
def __snake_case( _lowerCAmelCase ) -> Any:
return choice(_lowerCAmelCase )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> int:
snake_case__ : List[str] = random_pivot(_lowerCAmelCase )
# partition based on pivot
# linear time
snake_case__ : Optional[Any] = [e for e in lst if e < pivot]
snake_case__ : Tuple = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(_lowerCAmelCase ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(_lowerCAmelCase ) < k - 1:
return kth_number(_lowerCAmelCase , k - len(_lowerCAmelCase ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , snake_case_ : str , snake_case_ : Dict=7 , snake_case_ : str=3 , snake_case_ : List[str]=18 , snake_case_ : Tuple=30 , snake_case_ : int=400 , snake_case_ : Any=True , snake_case_ : List[str]=None , snake_case_ : List[str]=True , snake_case_ : Union[str, Any]=None , snake_case_ : Dict=True , ):
snake_case__ : List[str] = size if size is not None else {"""shortest_edge""": 20}
snake_case__ : Union[str, Any] = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
snake_case__ : Tuple = parent
snake_case__ : Tuple = batch_size
snake_case__ : List[str] = num_channels
snake_case__ : Any = image_size
snake_case__ : str = min_resolution
snake_case__ : Dict = max_resolution
snake_case__ : Optional[int] = do_resize
snake_case__ : int = size
snake_case__ : List[Any] = do_center_crop
snake_case__ : int = crop_size
snake_case__ : Dict = do_flip_channel_order
def lowerCamelCase ( self : str ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
lowercase = MobileViTImageProcessor if is_vision_available() else None
def lowerCamelCase ( self : List[str] ):
snake_case__ : List[str] = MobileViTImageProcessingTester(self )
@property
def lowerCamelCase ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , """do_resize""" ) )
self.assertTrue(hasattr(snake_case_ , """size""" ) )
self.assertTrue(hasattr(snake_case_ , """do_center_crop""" ) )
self.assertTrue(hasattr(snake_case_ , """center_crop""" ) )
self.assertTrue(hasattr(snake_case_ , """do_flip_channel_order""" ) )
def lowerCamelCase ( self : List[str] ):
snake_case__ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
snake_case__ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def lowerCamelCase ( self : str ):
pass
def lowerCamelCase ( self : int ):
# Initialize image_processing
snake_case__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
snake_case__ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case__ : List[str] = image_processing(snake_case_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCamelCase ( self : int ):
# Initialize image_processing
snake_case__ : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
snake_case__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case__ : str = image_processing(snake_case_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCamelCase ( self : List[Any] ):
# Initialize image_processing
snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
snake_case__ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case__ : Optional[Any] = image_processing(snake_case_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 43 | 1 |
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ ) -> Optional[int]:
"""simple docstring"""
super().__init__()
self.register_modules(unet=A_ , scheduler=A_ )
@torch.no_grad()
def __call__( self , A_ = 1 , A_ = None , A_ = 50 , A_ = "pil" , A_ = True , **A_ , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
UpperCamelCase = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=A_ , )
UpperCamelCase = image.to(self.device )
# set step values
self.scheduler.set_timesteps(A_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCamelCase = self.unet(A_ , A_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCamelCase = self.scheduler.step(A_ , A_ , A_ ).prev_sample
UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(A_ )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=A_ ), "This is a local test"
| 222 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def A ( lowercase , lowercase , lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = 0
if start < end:
UpperCamelCase = randint(lowercase , lowercase )
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase , UpperCamelCase = _in_place_partition(lowercase , lowercase , lowercase )
count += _in_place_quick_sort(lowercase , lowercase , p - 1 )
count += _in_place_quick_sort(lowercase , p + 1 , lowercase )
return count
def A ( lowercase , lowercase , lowercase ) -> int:
'''simple docstring'''
UpperCamelCase = 0
UpperCamelCase = randint(lowercase , lowercase )
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase = start - 1
for index in range(lowercase , lowercase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
UpperCamelCase = new_pivot_index + 1
UpperCamelCase = a[new_pivot_index]
UpperCamelCase = a[index]
UpperCamelCase = temp
UpperCamelCase = a[new_pivot_index + 1]
UpperCamelCase = a[end]
UpperCamelCase = temp
return new_pivot_index + 1, count
_UpperCAmelCase : Union[str, Any] = TemporaryFile()
_UpperCAmelCase : List[Any] = 100 # 1000 elements are to be sorted
_UpperCAmelCase ,_UpperCAmelCase : Any = 0, 1 # mean and standard deviation
_UpperCAmelCase : Any = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
_UpperCAmelCase : Any = np.load(outfile)
_UpperCAmelCase : str = len(M) - 1
_UpperCAmelCase : List[str] = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 222 | 1 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__ = True, __magic_name__ = None, __magic_name__ = 32, __magic_name__ = True, __magic_name__ = 1 / 255, __magic_name__ = True, __magic_name__ = True, __magic_name__ = [0.4814_5466, 0.457_8275, 0.4082_1073], __magic_name__ = [0.2686_2954, 0.2613_0258, 0.2757_7711], __magic_name__ = True, __magic_name__=7, __magic_name__=30, __magic_name__=400, __magic_name__=3, ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : List[str] = parent
UpperCamelCase__ : Optional[Any] = do_resize
UpperCamelCase__ : Dict = size if size is not None else {'''shortest_edge''': 288}
UpperCamelCase__ : Any = size_divisor
UpperCamelCase__ : Dict = do_rescale
UpperCamelCase__ : List[str] = rescale_factor
UpperCamelCase__ : Tuple = do_normalize
UpperCamelCase__ : Tuple = do_center_crop
UpperCamelCase__ : Tuple = image_mean
UpperCamelCase__ : Optional[int] = image_std
UpperCamelCase__ : Tuple = do_pad
UpperCamelCase__ : Dict = batch_size
UpperCamelCase__ : Union[str, Any] = num_channels
UpperCamelCase__ : Dict = min_resolution
UpperCamelCase__ : Optional[int] = max_resolution
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def UpperCamelCase__ ( self, __magic_name__, __magic_name__=False ) -> str:
"""simple docstring"""
if not batched:
UpperCamelCase__ : Dict = self.size['''shortest_edge''']
UpperCamelCase__ : Tuple = image_inputs[0]
if isinstance(__magic_name__, Image.Image ):
UpperCamelCase__ : Tuple = image.size
else:
UpperCamelCase__ : str = image.shape[1], image.shape[2]
UpperCamelCase__ : Any = size / min(__magic_name__, __magic_name__ )
if h < w:
UpperCamelCase__ : List[str] = size, scale * w
else:
UpperCamelCase__ : Union[str, Any] = scale * h, size
UpperCamelCase__ : int = int((1333 / 800) * size )
if max(__magic_name__, __magic_name__ ) > max_size:
UpperCamelCase__ : List[Any] = max_size / max(__magic_name__, __magic_name__ )
UpperCamelCase__ : Optional[Any] = newh * scale
UpperCamelCase__ : int = neww * scale
UpperCamelCase__ : Optional[Any] = int(newh + 0.5 ), int(neww + 0.5 )
UpperCamelCase__ : List[Any] = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
UpperCamelCase__ : Tuple = []
for image in image_inputs:
UpperCamelCase__ : str = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase__ : Union[str, Any] = max(__magic_name__, key=lambda __magic_name__ : item[0] )[0]
UpperCamelCase__ : Union[str, Any] = max(__magic_name__, key=lambda __magic_name__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase__ ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
a : Tuple = BridgeTowerImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = BridgeTowerImageProcessingTester(self )
@property
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__, '''image_mean''' ) )
self.assertTrue(hasattr(__magic_name__, '''image_std''' ) )
self.assertTrue(hasattr(__magic_name__, '''do_normalize''' ) )
self.assertTrue(hasattr(__magic_name__, '''do_resize''' ) )
self.assertTrue(hasattr(__magic_name__, '''size''' ) )
self.assertTrue(hasattr(__magic_name__, '''size_divisor''' ) )
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester, equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__, Image.Image )
# Test not batched input
UpperCamelCase__ : Optional[Any] = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
UpperCamelCase__ : Optional[Any] = self.image_processor_tester.get_expected_values(__magic_name__ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
UpperCamelCase__ : int = image_processing(__magic_name__, return_tensors='''pt''' ).pixel_values
UpperCamelCase__ : Any = self.image_processor_tester.get_expected_values(__magic_name__, batched=__magic_name__ )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ : Tuple = prepare_image_inputs(self.image_processor_tester, equal_resolution=__magic_name__, numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__, np.ndarray )
# Test not batched input
UpperCamelCase__ : List[str] = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
UpperCamelCase__ : Tuple = self.image_processor_tester.get_expected_values(__magic_name__ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
UpperCamelCase__ : Union[str, Any] = image_processing(__magic_name__, return_tensors='''pt''' ).pixel_values
UpperCamelCase__ : List[Any] = self.image_processor_tester.get_expected_values(__magic_name__, batched=__magic_name__ )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester, equal_resolution=__magic_name__, torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__, torch.Tensor )
# Test not batched input
UpperCamelCase__ : Any = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
UpperCamelCase__ : Dict = self.image_processor_tester.get_expected_values(__magic_name__ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
UpperCamelCase__ : int = image_processing(__magic_name__, return_tensors='''pt''' ).pixel_values
UpperCamelCase__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__magic_name__, batched=__magic_name__ )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
| 367 |
def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> list[int]:
if length <= 0 or not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(__UpperCAmelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 247 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
lowercase : int = logging.get_logger(__name__)
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[List[ImageInput]]:
if isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(SCREAMING_SNAKE_CASE__ ):
return [[videos]]
raise ValueError(f"Could not make batched video from {videos}" )
class __snake_case ( lowerCAmelCase ):
_a : Dict= ["pixel_values"]
def __init__( self ,snake_case = True ,snake_case = None ,snake_case = PILImageResampling.BILINEAR ,snake_case = True ,snake_case = None ,snake_case = True ,snake_case = 1 / 255 ,snake_case = True ,snake_case = True ,snake_case = None ,snake_case = None ,**snake_case ,):
'''simple docstring'''
super().__init__(**snake_case )
lowercase : int = size if size is not None else {"""shortest_edge""": 256}
lowercase : Union[str, Any] = get_size_dict(snake_case ,default_to_square=snake_case )
lowercase : str = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowercase : Tuple = get_size_dict(snake_case ,param_name="""crop_size""" )
lowercase : Union[str, Any] = do_resize
lowercase : str = size
lowercase : Optional[int] = do_center_crop
lowercase : List[Any] = crop_size
lowercase : List[str] = resample
lowercase : Any = do_rescale
lowercase : Union[str, Any] = rescale_factor
lowercase : Any = offset
lowercase : Optional[int] = do_normalize
lowercase : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case = PILImageResampling.BILINEAR ,snake_case = None ,**snake_case ,):
'''simple docstring'''
lowercase : List[Any] = get_size_dict(snake_case ,default_to_square=snake_case )
if "shortest_edge" in size:
lowercase : str = get_resize_output_image_size(snake_case ,size["""shortest_edge"""] ,default_to_square=snake_case )
elif "height" in size and "width" in size:
lowercase : Optional[int] = (size["""height"""], size["""width"""])
else:
raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(snake_case ,size=snake_case ,resample=snake_case ,data_format=snake_case ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case = None ,**snake_case ,):
'''simple docstring'''
lowercase : List[Any] = get_size_dict(snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(snake_case ,size=(size["""height"""], size["""width"""]) ,data_format=snake_case ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case = True ,snake_case = None ,**snake_case ,):
'''simple docstring'''
lowercase : Union[str, Any] = image.astype(np.floataa )
if offset:
lowercase : Optional[Any] = image - (scale / 2)
return rescale(snake_case ,scale=snake_case ,data_format=snake_case ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case = None ,**snake_case ,):
'''simple docstring'''
return normalize(snake_case ,mean=snake_case ,std=snake_case ,data_format=snake_case ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = ChannelDimension.FIRST ,):
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
if offset and not do_rescale:
raise ValueError("""For offset, do_rescale must also be set to True.""" )
# All transformations expect numpy arrays.
lowercase : Optional[int] = to_numpy_array(snake_case )
if do_resize:
lowercase : Union[str, Any] = self.resize(image=snake_case ,size=snake_case ,resample=snake_case )
if do_center_crop:
lowercase : Optional[Any] = self.center_crop(snake_case ,size=snake_case )
if do_rescale:
lowercase : Optional[int] = self.rescale(image=snake_case ,scale=snake_case ,offset=snake_case )
if do_normalize:
lowercase : Any = self.normalize(image=snake_case ,mean=snake_case ,std=snake_case )
lowercase : Any = to_channel_dimension_format(snake_case ,snake_case )
return image
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = ChannelDimension.FIRST ,**snake_case ,):
'''simple docstring'''
lowercase : List[Any] = do_resize if do_resize is not None else self.do_resize
lowercase : List[str] = resample if resample is not None else self.resample
lowercase : str = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase : Any = do_rescale if do_rescale is not None else self.do_rescale
lowercase : str = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : List[str] = offset if offset is not None else self.offset
lowercase : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
lowercase : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
lowercase : Optional[Any] = image_std if image_std is not None else self.image_std
lowercase : int = size if size is not None else self.size
lowercase : List[Any] = get_size_dict(snake_case ,default_to_square=snake_case )
lowercase : str = crop_size if crop_size is not None else self.crop_size
lowercase : List[Any] = get_size_dict(snake_case ,param_name="""crop_size""" )
if not valid_images(snake_case ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
lowercase : List[Any] = make_batched(snake_case )
lowercase : Any = [
[
self._preprocess_image(
image=snake_case ,do_resize=snake_case ,size=snake_case ,resample=snake_case ,do_center_crop=snake_case ,crop_size=snake_case ,do_rescale=snake_case ,rescale_factor=snake_case ,offset=snake_case ,do_normalize=snake_case ,image_mean=snake_case ,image_std=snake_case ,data_format=snake_case ,)
for img in video
]
for video in videos
]
lowercase : Optional[int] = {"""pixel_values""": videos}
return BatchFeature(data=snake_case ,tensor_type=snake_case )
| 20 | """simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
SCREAMING_SNAKE_CASE__ = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def lowerCAmelCase__ ( _UpperCamelCase : Tuple ) -> List[Any]:
"""simple docstring"""
snake_case = test_results.split(' ' )
snake_case = 0
snake_case = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
snake_case = expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(_UpperCamelCase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] ) -> List[str]:
"""simple docstring"""
snake_case = {}
snake_case = None
snake_case = False
for line in failures_short_lines.split('\n' ):
if re.search(r'_ \[doctest\]' , _UpperCamelCase ):
snake_case = True
snake_case = line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
snake_case = line
snake_case = False
return failures
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = title
snake_case = doc_test_results['time_spent'].split(',' )[0]
snake_case = doc_test_results['success']
snake_case = doc_test_results['failures']
snake_case = self.n_success + self.n_failures
# Failures and success of the modeling tests
snake_case = doc_test_results
@property
def snake_case ( self ):
"""simple docstring"""
snake_case = [self._time_spent]
snake_case = 0
for time in time_spent:
snake_case = time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(lowerCAmelCase ) == 1:
snake_case = [0, 0, time_parts[0]]
snake_case ,snake_case ,snake_case = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
snake_case ,snake_case ,snake_case = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return F"""{int(lowerCAmelCase )}h{int(lowerCAmelCase )}m{int(lowerCAmelCase )}s"""
@property
def snake_case ( self ):
"""simple docstring"""
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def snake_case ( self ):
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
@property
def snake_case ( self ):
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"""
F""" {self.time}."""
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
@property
def snake_case ( self ):
"""simple docstring"""
snake_case = 40
snake_case = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(lowerCAmelCase , lowerCAmelCase )}
snake_case = ''
for category, failures in category_failures.items():
if len(lowerCAmelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += F"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(lowerCAmelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F"""The following examples had failures:\n\n\n{report}\n""",
},
}
@property
def snake_case ( self ):
"""simple docstring"""
snake_case = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(lowerCAmelCase )
@staticmethod
def snake_case ( ):
"""simple docstring"""
snake_case = [
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': F"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(lowerCAmelCase )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=lowerCAmelCase , )
def snake_case ( self ):
"""simple docstring"""
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
snake_case = F"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else 'All tests passed.'
snake_case = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=lowerCAmelCase , )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = ''
for key, value in failures.items():
snake_case = value[:2_00] + ' [Truncated]' if len(lowerCAmelCase ) > 2_50 else value
failures_text += F"""*{key}*\n_{value}_\n\n"""
snake_case = job_name
snake_case = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
snake_case = {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def snake_case ( self ):
"""simple docstring"""
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
snake_case = self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
snake_case = sorted(self.doc_test_results.items() , key=lambda lowerCAmelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
snake_case = F"""*Num failures* :{len(job_result["failed"] )} \n"""
snake_case = job_result['failures']
snake_case = self.get_reply_blocks(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , text=lowerCAmelCase )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=F"""Results for {job}""" , blocks=lowerCAmelCase , thread_ts=self.thread_ts['ts'] , )
time.sleep(1 )
def lowerCAmelCase__ ( ) -> Tuple:
"""simple docstring"""
snake_case = os.environ['GITHUB_RUN_ID']
snake_case = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"""
snake_case = requests.get(_UpperCamelCase ).json()
snake_case = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
snake_case = math.ceil((result['total_count'] - 1_0_0) / 1_0_0 )
for i in range(_UpperCamelCase ):
snake_case = requests.get(url + f"""&page={i + 2}""" ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , _UpperCamelCase )
return {}
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> List[str]:
"""simple docstring"""
snake_case = {}
if os.path.exists(_UpperCamelCase ):
snake_case = os.listdir(_UpperCamelCase )
for file in files:
try:
with open(os.path.join(_UpperCamelCase , _UpperCamelCase ) , encoding='utf-8' ) as f:
snake_case = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"""Could not open {os.path.join(_UpperCamelCase , _UpperCamelCase )}.""" ) from e
return _artifact
def lowerCAmelCase__ ( ) -> Union[str, Any]:
"""simple docstring"""
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = name
snake_case = []
def __str__( self ):
"""simple docstring"""
return self.name
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
self.paths.append({'name': self.name, 'path': path} )
snake_case = {}
snake_case = filter(os.path.isdir , os.listdir() )
for directory in directories:
snake_case = directory
if artifact_name not in _available_artifacts:
snake_case = Artifact(_UpperCamelCase )
_available_artifacts[artifact_name].add_path(_UpperCamelCase )
return _available_artifacts
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = get_job_links()
SCREAMING_SNAKE_CASE__ = retrieve_available_artifacts()
SCREAMING_SNAKE_CASE__ = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
SCREAMING_SNAKE_CASE__ = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
SCREAMING_SNAKE_CASE__ = github_actions_job_links.get("run_doctests")
SCREAMING_SNAKE_CASE__ = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
SCREAMING_SNAKE_CASE__ = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = handle_test_results(artifact["stats"])
SCREAMING_SNAKE_CASE__ = failed
SCREAMING_SNAKE_CASE__ = success
SCREAMING_SNAKE_CASE__ = time_spent[1:-1] + ", "
SCREAMING_SNAKE_CASE__ = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
SCREAMING_SNAKE_CASE__ = line.replace("FAILED ", "")
SCREAMING_SNAKE_CASE__ = line.split()[0].replace("\n", "")
if "::" in line:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = line.split("::")
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
SCREAMING_SNAKE_CASE__ = docs[file_regex]
doc_test_results[category]["failed"].append(test)
SCREAMING_SNAKE_CASE__ = all_failures[test] if test in all_failures else "N/A"
SCREAMING_SNAKE_CASE__ = failure
break
SCREAMING_SNAKE_CASE__ = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 150 | 0 |
from __future__ import annotations
import time
A : Any = list[tuple[int, int]]
A : Any = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
A : Tuple = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class A :
'''simple docstring'''
def __init__( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Node | None ) -> int:
"""simple docstring"""
A__ = pos_x
A__ = pos_y
A__ = (pos_y, pos_x)
A__ = goal_x
A__ = goal_y
A__ = parent
class A :
'''simple docstring'''
def __init__( self : Optional[Any] , __lowerCAmelCase : tuple[int, int] , __lowerCAmelCase : tuple[int, int] ) -> List[str]:
"""simple docstring"""
A__ = Node(start[1] , start[0] , goal[1] , goal[0] , __lowerCAmelCase )
A__ = Node(goal[1] , goal[0] , goal[1] , goal[0] , __lowerCAmelCase )
A__ = [self.start]
A__ = False
def a_ ( self : List[str] ) -> Path | None:
"""simple docstring"""
while self.node_queue:
A__ = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
A__ = True
return self.retrace_path(__lowerCAmelCase )
A__ = self.get_successors(__lowerCAmelCase )
for node in successors:
self.node_queue.append(__lowerCAmelCase )
if not self.reached:
return [self.start.pos]
return None
def a_ ( self : Any , __lowerCAmelCase : Node ) -> list[Node]:
"""simple docstring"""
A__ = []
for action in delta:
A__ = parent.pos_x + action[1]
A__ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__lowerCAmelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(__lowerCAmelCase , __lowerCAmelCase , self.target.pos_y , self.target.pos_x , __lowerCAmelCase ) )
return successors
def a_ ( self : List[Any] , __lowerCAmelCase : Node | None ) -> Path:
"""simple docstring"""
A__ = node
A__ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
A__ = current_node.parent
path.reverse()
return path
class A :
'''simple docstring'''
def __init__( self : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
A__ = BreadthFirstSearch(__lowerCAmelCase , __lowerCAmelCase )
A__ = BreadthFirstSearch(__lowerCAmelCase , __lowerCAmelCase )
A__ = False
def a_ ( self : Optional[Any] ) -> Path | None:
"""simple docstring"""
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
A__ = self.fwd_bfs.node_queue.pop(0 )
A__ = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
A__ = True
return self.retrace_bidirectional_path(
__lowerCAmelCase , __lowerCAmelCase )
A__ = current_bwd_node
A__ = current_fwd_node
A__ = {
self.fwd_bfs: self.fwd_bfs.get_successors(__lowerCAmelCase ),
self.bwd_bfs: self.bwd_bfs.get_successors(__lowerCAmelCase ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(__lowerCAmelCase )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def a_ ( self : int , __lowerCAmelCase : Node , __lowerCAmelCase : Node ) -> Path:
"""simple docstring"""
A__ = self.fwd_bfs.retrace_path(__lowerCAmelCase )
A__ = self.bwd_bfs.retrace_path(__lowerCAmelCase )
bwd_path.pop()
bwd_path.reverse()
A__ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
A : str = (0, 0)
A : List[str] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
A : Dict = time.time()
A : Any = BreadthFirstSearch(init, goal)
A : Union[str, Any] = bfs.search()
A : Any = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
A : Optional[int] = time.time()
A : str = BidirectionalBreadthFirstSearch(init, goal)
A : List[Any] = bd_bfs.search()
A : Dict = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 276 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def __lowerCamelCase ( __a :int ) -> int:
"""simple docstring"""
A__ = prime_factors(__a )
if is_square_free(__a ):
return -1 if len(__a ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 276 | 1 |
'''simple docstring'''
import os
import sys
import unittest
__SCREAMING_SNAKE_CASE : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(git_repo_path, """src""", """transformers""")
__SCREAMING_SNAKE_CASE : Optional[int] = """
{0} = None
"""
__SCREAMING_SNAKE_CASE : Dict = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
def _A ( self : Any ):
_UpperCAmelCase : Optional[Any] = find_backend(" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")" )
self.assertIsNone(A )
_UpperCAmelCase : int = find_backend(" if not is_tokenizers_available():" )
self.assertEqual(A , "tokenizers" )
_UpperCAmelCase : Optional[int] = find_backend(" if not is_tensorflow_text_available():" )
self.assertEqual(A , "tensorflow_text" )
_UpperCAmelCase : List[str] = find_backend(" if not (is_sentencepiece_available() and is_tokenizers_available()):" )
self.assertEqual(A , "sentencepiece_and_tokenizers" )
_UpperCAmelCase : Any = find_backend(
" if not (is_sentencepiece_available() and is_tensorflow_text_available()):" )
self.assertEqual(A , "sentencepiece_and_tensorflow_text" )
_UpperCAmelCase : Union[str, Any] = find_backend(
" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):" )
self.assertEqual(A , "sentencepiece_and_tokenizers_and_vision" )
def _A ( self : Any ):
_UpperCAmelCase : str = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , A )
self.assertIn("tensorflow_text" , A )
self.assertIn("sentencepiece_and_tokenizers" , A )
# Likewise, we can't assert on the exact content of a key
self.assertIn("BertModel" , objects["torch"] )
self.assertIn("TFBertModel" , objects["tf"] )
self.assertIn("FlaxBertModel" , objects["flax"] )
self.assertIn("BertModel" , objects["torch"] )
self.assertIn("TFBertTokenizer" , objects["tensorflow_text"] )
self.assertIn("convert_slow_tokenizer" , objects["sentencepiece_and_tokenizers"] )
def _A ( self : Dict ):
_UpperCAmelCase : List[str] = create_dummy_object("CONSTANT" , "'torch'" )
self.assertEqual(A , "\nCONSTANT = None\n" )
_UpperCAmelCase : Union[str, Any] = create_dummy_object("function" , "'torch'" )
self.assertEqual(
A , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
_UpperCAmelCase : List[str] = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n"
_UpperCAmelCase : Any = create_dummy_object("FakeClass" , "'torch'" )
self.assertEqual(A , A )
def _A ( self : Optional[Any] ):
_UpperCAmelCase : Dict = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n"
_UpperCAmelCase : Tuple = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , A )
| 31 | '''simple docstring'''
__SCREAMING_SNAKE_CASE : Dict = 8.3_1_4_4_6_2 # Unit - J mol-1 K-1
def UpperCamelCase_ ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def UpperCamelCase_ ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 31 | 1 |
"""simple docstring"""
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
_lowerCAmelCase : Tuple = HUGGINGFACE_HUB_CACHE
_lowerCAmelCase : int = """config.json"""
_lowerCAmelCase : Dict = """diffusion_pytorch_model.bin"""
_lowerCAmelCase : Dict = """diffusion_flax_model.msgpack"""
_lowerCAmelCase : int = """model.onnx"""
_lowerCAmelCase : List[Any] = """diffusion_pytorch_model.safetensors"""
_lowerCAmelCase : Any = """weights.pb"""
_lowerCAmelCase : int = """https://huggingface.co"""
_lowerCAmelCase : Union[str, Any] = default_cache_path
_lowerCAmelCase : str = """diffusers_modules"""
_lowerCAmelCase : List[str] = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
_lowerCAmelCase : Any = ["""fp16""", """non-ema"""]
_lowerCAmelCase : List[str] = """.self_attn"""
| 298 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowerCAmelCase__ ( datasets.BeamBasedBuilder ):
def __a ( self : Dict ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=snake_case__ , )
def __a ( self : int , snake_case__ : str , snake_case__ : List[str] ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def __a ( self : Any , snake_case__ : str , snake_case__ : str ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(snake_case__ )
class lowerCAmelCase__ ( datasets.BeamBasedBuilder ):
def __a ( self : Any ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=snake_case__ , )
def __a ( self : Union[str, Any] , snake_case__ : int , snake_case__ : int ):
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def __a ( self : Dict , snake_case__ : List[Any] , snake_case__ : Any ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( )-> Dict:
'''simple docstring'''
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def SCREAMING_SNAKE_CASE__ ( )-> List[Any]:
'''simple docstring'''
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class lowerCAmelCase__ ( __magic_name__ ):
@require_beam
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Any = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : List[Any] = DummyBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
UpperCAmelCase__ : Tuple = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , snake_case__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def __a ( self : Dict ):
'''simple docstring'''
import apache_beam as beam
UpperCAmelCase__ : Dict = beam.io.parquetio.WriteToParquet
UpperCAmelCase__ : List[str] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Union[str, Any] = DummyBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
UpperCAmelCase__ : List[Any] = partial(snake_case__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
UpperCAmelCase__ : Dict = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , snake_case__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def __a ( self : str ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Optional[Any] = DummyBeamDataset(cache_dir=snake_case__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : List[Any] = NestedBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
UpperCAmelCase__ : Tuple = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , snake_case__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 298 | 1 |
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class lowerCamelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : Dict = MvpTokenizer
a__ : str = MvpTokenizerFast
a__ : Any = True
a__ : Optional[int] = filter_roberta_detectors
def UpperCamelCase__ ( self) -> List[str]:
super().setUp()
__UpperCamelCase :str = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__UpperCamelCase :int = dict(zip(__lowercase , range(len(__lowercase))))
__UpperCamelCase :Dict = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__UpperCamelCase :List[str] = {'''unk_token''': '''<unk>'''}
__UpperCamelCase :Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
__UpperCamelCase :str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
fp.write(json.dumps(__lowercase) + '''\n''')
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(__lowercase))
def UpperCamelCase__ ( self , **__lowercase) -> Optional[int]:
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase)
def UpperCamelCase__ ( self , **__lowercase) -> Union[str, Any]:
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> str:
return "lower newer", "lower newer"
@cached_property
def UpperCamelCase__ ( self) -> Optional[Any]:
return MvpTokenizer.from_pretrained('''RUCAIBox/mvp''')
@cached_property
def UpperCamelCase__ ( self) -> Any:
return MvpTokenizerFast.from_pretrained('''RUCAIBox/mvp''')
@require_torch
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :Tuple = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__UpperCamelCase :Dict = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCamelCase :Tuple = tokenizer(__lowercase , max_length=len(__lowercase) , padding=__lowercase , return_tensors='''pt''')
self.assertIsInstance(__lowercase , __lowercase)
self.assertEqual((2, 9) , batch.input_ids.shape)
self.assertEqual((2, 9) , batch.attention_mask.shape)
__UpperCamelCase :List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(__lowercase , __lowercase)
# Test that special tokens are reset
@require_torch
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :List[Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCamelCase :int = tokenizer(__lowercase , padding=__lowercase , return_tensors='''pt''')
# check if input_ids are returned and no labels
self.assertIn('''input_ids''' , __lowercase)
self.assertIn('''attention_mask''' , __lowercase)
self.assertNotIn('''labels''' , __lowercase)
self.assertNotIn('''decoder_attention_mask''' , __lowercase)
@require_torch
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :List[Any] = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCamelCase :Union[str, Any] = tokenizer(text_target=__lowercase , max_length=32 , padding='''max_length''' , return_tensors='''pt''')
self.assertEqual(32 , targets['''input_ids'''].shape[1])
@require_torch
def UpperCamelCase__ ( self) -> List[str]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCamelCase :Union[str, Any] = tokenizer(
['''I am a small frog''' * 1_024, '''I am a small frog'''] , padding=__lowercase , truncation=__lowercase , return_tensors='''pt''')
self.assertIsInstance(__lowercase , __lowercase)
self.assertEqual(batch.input_ids.shape , (2, 1_024))
@require_torch
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :List[Any] = ['''A long paragraph for summarization.''']
__UpperCamelCase :List[Any] = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCamelCase :List[Any] = tokenizer(__lowercase , text_target=__lowercase , return_tensors='''pt''')
__UpperCamelCase :Tuple = inputs['''input_ids''']
__UpperCamelCase :Optional[int] = inputs['''labels''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item())
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item())
def UpperCamelCase__ ( self) -> Optional[int]:
pass
def UpperCamelCase__ ( self) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
__UpperCamelCase :List[Any] = self.rust_tokenizer_class.from_pretrained(__lowercase , **__lowercase)
__UpperCamelCase :int = self.tokenizer_class.from_pretrained(__lowercase , **__lowercase)
__UpperCamelCase :str = '''A, <mask> AllenNLP sentence.'''
__UpperCamelCase :List[Any] = tokenizer_r.encode_plus(__lowercase , add_special_tokens=__lowercase , return_token_type_ids=__lowercase)
__UpperCamelCase :Union[str, Any] = tokenizer_p.encode_plus(__lowercase , add_special_tokens=__lowercase , return_token_type_ids=__lowercase)
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids''']) , sum(tokens_p['''token_type_ids''']))
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask''']) / len(tokens_r['''attention_mask''']) , sum(tokens_p['''attention_mask''']) / len(tokens_p['''attention_mask''']) , )
__UpperCamelCase :Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''])
__UpperCamelCase :Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''])
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2])
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2])
self.assertSequenceEqual(
__lowercase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''])
self.assertSequenceEqual(
__lowercase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''])
| 43 | import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
__lowercase = logging.get_logger(__name__)
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = R'''\w+[.]\d+'''
__UpperCamelCase :List[str] = re.findall(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for pat in pats:
__UpperCamelCase :int = key.replace(SCREAMING_SNAKE_CASE , '''_'''.join(pat.split('''.''' ) ) )
return key
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
__UpperCamelCase :str = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
__UpperCamelCase :Any = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
__UpperCamelCase :str = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
__UpperCamelCase :List[str] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
__UpperCamelCase :List[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__UpperCamelCase :List[str] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
__UpperCamelCase :Any = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__UpperCamelCase :int = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__UpperCamelCase :int = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=42 ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
__UpperCamelCase :str = flax_model.init_weights(PRNGKey(SCREAMING_SNAKE_CASE ) )
__UpperCamelCase :int = flatten_dict(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__UpperCamelCase :List[Any] = rename_key(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
__UpperCamelCase , __UpperCamelCase :Any = rename_key_and_reshape_tensor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
__UpperCamelCase :str = jnp.asarray(SCREAMING_SNAKE_CASE )
return unflatten_dict(SCREAMING_SNAKE_CASE )
| 43 | 1 |
"""simple docstring"""
def _lowerCamelCase( a , a ):
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F'''{price_plus_tax(100, 0.25) = }''')
print(F'''{price_plus_tax(125.50, 0.05) = }''')
| 268 | """simple docstring"""
from ...processing_utils import ProcessorMixin
class snake_case__ ( snake_case_ ):
_snake_case : List[str] = """WhisperFeatureExtractor"""
_snake_case : Any = """WhisperTokenizer"""
def __init__( self , lowerCamelCase , lowerCamelCase ):
super().__init__(lowerCamelCase , lowerCamelCase )
__a = self.feature_extractor
__a = False
def a__ ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=True ):
return self.tokenizer.get_decoder_prompt_ids(task=lowerCamelCase , language=lowerCamelCase , no_timestamps=lowerCamelCase )
def __call__( self , *lowerCamelCase , **lowerCamelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase , **lowerCamelCase )
__a = kwargs.pop("audio" , lowerCamelCase )
__a = kwargs.pop("sampling_rate" , lowerCamelCase )
__a = kwargs.pop("text" , lowerCamelCase )
if len(lowerCamelCase ) > 0:
__a = args[0]
__a = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
__a = self.feature_extractor(lowerCamelCase , *lowerCamelCase , sampling_rate=lowerCamelCase , **lowerCamelCase )
if text is not None:
__a = self.tokenizer(lowerCamelCase , **lowerCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__a = encodings["input_ids"]
return inputs
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase="np" ):
return self.tokenizer.get_prompt_ids(lowerCamelCase , return_tensors=lowerCamelCase )
| 268 | 1 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
_lowerCamelCase : Optional[Any] = "http://www.mocksite.com/file1.txt"
_lowerCamelCase : Any = "\"text\": [\"foo\", \"foo\"]"
_lowerCamelCase : str = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class __UpperCAmelCase :
UpperCamelCase = 2_0_0
UpperCamelCase = {"""Content-Length""": """100"""}
UpperCamelCase = {}
def __magic_name__ ( self : Dict, **__A : List[str] ):
return [bytes(snake_case_, '''utf-8''' )]
def a__ ( *UpperCAmelCase : Tuple , **UpperCAmelCase : Optional[Any] ) -> Any:
return MockResponse()
@pytest.mark.parametrize('''urls_type''' , [str, list, dict] )
def a__ ( UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
import requests
monkeypatch.setattr(lowercase_ , '''request''' , lowercase_ )
UpperCAmelCase : str = URL
if issubclass(lowercase_ , lowercase_ ):
UpperCAmelCase : Optional[Any] = url
elif issubclass(lowercase_ , lowercase_ ):
UpperCAmelCase : Dict = [url]
elif issubclass(lowercase_ , lowercase_ ):
UpperCAmelCase : str = {'''train''': url}
UpperCAmelCase : List[Any] = '''dummy'''
UpperCAmelCase : int = '''downloads'''
UpperCAmelCase : Dict = tmp_path
UpperCAmelCase : Optional[Any] = DownloadConfig(
cache_dir=os.path.join(lowercase_ , lowercase_ ) , use_etag=lowercase_ , )
UpperCAmelCase : Union[str, Any] = DownloadManager(dataset_name=lowercase_ , download_config=lowercase_ )
UpperCAmelCase : List[Any] = dl_manager.download(lowercase_ )
UpperCAmelCase : List[str] = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase : List[Any] = [downloaded_paths]
UpperCAmelCase : Any = [urls]
elif isinstance(lowercase_ , lowercase_ ):
assert "train" in downloaded_paths.keys()
UpperCAmelCase : Optional[Any] = downloaded_paths.values()
UpperCAmelCase : Optional[Any] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(lowercase_ , lowercase_ ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
UpperCAmelCase : List[Any] = Path(lowercase_ )
UpperCAmelCase : int = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
UpperCAmelCase : Optional[int] = downloaded_path.read_text()
assert content == CONTENT
UpperCAmelCase : List[str] = downloaded_path.with_suffix('''.json''' )
assert metadata_downloaded_path.exists()
UpperCAmelCase : List[Any] = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('''paths_type''' , [str, list, dict] )
def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Dict ) -> Any:
UpperCAmelCase : int = str(lowercase_ )
if issubclass(lowercase_ , lowercase_ ):
UpperCAmelCase : List[str] = filename
elif issubclass(lowercase_ , lowercase_ ):
UpperCAmelCase : str = [filename]
elif issubclass(lowercase_ , lowercase_ ):
UpperCAmelCase : Optional[int] = {'''train''': filename}
UpperCAmelCase : Optional[Any] = '''dummy'''
UpperCAmelCase : int = xz_file.parent
UpperCAmelCase : int = '''extracted'''
UpperCAmelCase : Optional[int] = DownloadConfig(
cache_dir=lowercase_ , use_etag=lowercase_ , )
UpperCAmelCase : Any = DownloadManager(dataset_name=lowercase_ , download_config=lowercase_ )
UpperCAmelCase : Tuple = dl_manager.extract(lowercase_ )
UpperCAmelCase : Dict = paths
for extracted_paths in [extracted_paths]:
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase : Any = [extracted_paths]
UpperCAmelCase : List[str] = [paths]
elif isinstance(lowercase_ , lowercase_ ):
assert "train" in extracted_paths.keys()
UpperCAmelCase : Any = extracted_paths.values()
UpperCAmelCase : Union[str, Any] = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(lowercase_ , lowercase_ ):
assert extracted_path == dl_manager.extracted_paths[input_path]
UpperCAmelCase : Optional[Any] = Path(lowercase_ )
UpperCAmelCase : List[str] = extracted_path.parts
assert parts[-1] == hash_url_to_filename(lowercase_ , etag=lowercase_ )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
UpperCAmelCase : Union[str, Any] = extracted_path.read_text()
UpperCAmelCase : Optional[int] = text_file.read_text()
assert extracted_file_content == expected_file_content
def a__ ( UpperCAmelCase : List[Any] , UpperCAmelCase : Any ) -> Dict:
assert path.endswith('''.jsonl''' )
for num_items, line in enumerate(lowercase_ , start=1 ):
UpperCAmelCase : Union[str, Any] = json.loads(line.decode('''utf-8''' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('''archive_jsonl''' , ['''tar_jsonl_path''', '''zip_jsonl_path'''] )
def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : str ) -> str:
UpperCAmelCase : List[str] = request.getfixturevalue(lowercase_ )
UpperCAmelCase : Any = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(lowercase_ ) , start=1 ):
_test_jsonl(lowercase_ , lowercase_ )
assert num_jsonl == 2
@pytest.mark.parametrize('''archive_nested_jsonl''' , ['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] )
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] ) -> Optional[Any]:
UpperCAmelCase : int = request.getfixturevalue(lowercase_ )
UpperCAmelCase : Optional[Any] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(lowercase_ ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(lowercase_ ) , start=1 ):
_test_jsonl(lowercase_ , lowercase_ )
assert num_tar == 1
assert num_jsonl == 2
def a__ ( UpperCAmelCase : Union[str, Any] ) -> Any:
UpperCAmelCase : int = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(lowercase_ ) , start=1 ):
assert os.path.basename(lowercase_ ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 336 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
lowercase__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowercase__ = field(
default=A_, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowercase__ = field(
default=A_, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowercase__ = field(
default=A_, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, )
lowercase__ = field(
default=A_, metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''}, )
lowercase__ = field(
default='''main''', metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''}, )
lowercase__ = field(
default=A_, metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
}, )
@dataclass
class UpperCAmelCase_ :
lowercase__ = field(default=A_, metadata={'''help''': '''The input training data file (a text file).'''} )
lowercase__ = field(
default=A_, metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''}, )
lowercase__ = field(
default=A_, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
lowercase__ = field(
default=A_, metadata={'''help''': '''The number of processes to use for the preprocessing.'''}, )
lowercase__ = field(
default=A_, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. If passed, sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
}, )
lowercase__ = field(
default=A_, metadata={
'''help''': (
'''Whether to pad all samples to the maximum sentence length. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch. More '''
'''efficient on GPU but very bad for TPU.'''
)
}, )
lowercase__ = field(
default=A_, metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
}, )
lowercase__ = field(
default=A_, metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
}, )
def __magic_name__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
if self.train_file is not None:
A__ = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
A__ = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class UpperCAmelCase_ :
lowercase__ = 42
lowercase__ = True
lowercase__ = None
lowercase__ = None
def __call__( self : Optional[Any] , snake_case_ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
A__ = "label" if "label" in features[0].keys() else "labels"
A__ = [feature.pop(snake_case_ ) for feature in features]
A__ = len(snake_case_ )
A__ = len(features[0]["input_ids"] )
A__ = [
[{k: v[i] for k, v in feature.items()} for i in range(snake_case_ )] for feature in features
]
A__ = list(chain(*snake_case_ ) )
A__ = self.tokenizer.pad(
snake_case_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
A__ = {k: v.view(snake_case_ , snake_case_ , -1 ) for k, v in batch.items()}
# Add back labels
A__ = torch.tensor(snake_case_ , dtype=torch.intaa )
return batch
def _SCREAMING_SNAKE_CASE ( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A__, A__, A__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A__, A__, A__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , lowercase_ , lowercase_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A__ = training_args.get_process_log_level()
logger.setLevel(lowercase_ )
datasets.utils.logging.set_verbosity(lowercase_ )
transformers.utils.logging.set_verbosity(lowercase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
A__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
A__ = {}
if data_args.train_file is not None:
A__ = data_args.train_file
if data_args.validation_file is not None:
A__ = data_args.validation_file
A__ = data_args.train_file.split("." )[-1]
A__ = load_dataset(
lowercase_ , data_files=lowercase_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
A__ = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A__ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
A__ = [f"""ending{i}""" for i in range(4 )]
A__ = "sent1"
A__ = "sent2"
if data_args.max_seq_length is None:
A__ = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
A__ = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
A__ = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowercase_ ):
A__ = [[context] * 4 for context in examples[context_name]]
A__ = examples[question_header_name]
A__ = [
[f"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(lowercase_ )
]
# Flatten out
A__ = list(chain(*lowercase_ ) )
A__ = list(chain(*lowercase_ ) )
# Tokenize
A__ = tokenizer(
lowercase_ , lowercase_ , truncation=lowercase_ , max_length=lowercase_ , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowercase_ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
A__ = raw_datasets["train"]
if data_args.max_train_samples is not None:
A__ = min(len(lowercase_ ) , data_args.max_train_samples )
A__ = train_dataset.select(range(lowercase_ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
A__ = train_dataset.map(
lowercase_ , batched=lowercase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
A__ = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
A__ = min(len(lowercase_ ) , data_args.max_eval_samples )
A__ = eval_dataset.select(range(lowercase_ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
A__ = eval_dataset.map(
lowercase_ , batched=lowercase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
A__ = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowercase_ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowercase_ ):
A__, A__ = eval_predictions
A__ = np.argmax(lowercase_ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
A__ = Trainer(
model=lowercase_ , args=lowercase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowercase_ , data_collator=lowercase_ , compute_metrics=lowercase_ , )
# Training
if training_args.do_train:
A__ = None
if training_args.resume_from_checkpoint is not None:
A__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A__ = last_checkpoint
A__ = trainer.train(resume_from_checkpoint=lowercase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
A__ = train_result.metrics
A__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase_ )
)
A__ = min(lowercase_ , len(lowercase_ ) )
trainer.log_metrics("train" , lowercase_ )
trainer.save_metrics("train" , lowercase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
A__ = trainer.evaluate()
A__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase_ )
A__ = min(lowercase_ , len(lowercase_ ) )
trainer.log_metrics("eval" , lowercase_ )
trainer.save_metrics("eval" , lowercase_ )
A__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase_ )
else:
trainer.create_model_card(**lowercase_ )
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 247 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = "▁"
__magic_name__ = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
__magic_name__ = {
"vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model",
},
"monolingual_vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt",
},
}
__magic_name__ = {"vinai/bartpho-syllable": 1024}
class SCREAMING_SNAKE_CASE_ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowercase : Tuple = VOCAB_FILES_NAMES
__lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
# Mask token behave like a normal word, i.e. include the space before it
__SCREAMING_SNAKE_CASE = AddedToken(_a , lstrip=_a , rstrip=_a) if isinstance(_a , _a) else mask_token
__SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
__SCREAMING_SNAKE_CASE = vocab_file
__SCREAMING_SNAKE_CASE = monolingual_vocab_file
__SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(_a))
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(_a) not in self.fairseq_tokens_to_ids:
__SCREAMING_SNAKE_CASE = cnt
cnt += 1
with open(_a , """r""" , encoding="""utf-8""") as f:
for line in f.readlines():
__SCREAMING_SNAKE_CASE = line.strip().split()[0]
__SCREAMING_SNAKE_CASE = len(self.fairseq_tokens_to_ids)
if str(_a) not in self.fairseq_tokens_to_ids:
__SCREAMING_SNAKE_CASE = len(self.fairseq_tokens_to_ids)
__SCREAMING_SNAKE_CASE = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self):
__SCREAMING_SNAKE_CASE = self.__dict__.copy()
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs"""):
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a)
if token_ids_a is None:
return [1] + ([0] * len(_a)) + [1]
return [1] + ([0] * len(_a)) + [1, 1] + ([0] * len(_a)) + [1]
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None):
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def snake_case_ ( self):
return len(self.fairseq_ids_to_tokens)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(_a): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def snake_case_ ( self , lowerCAmelCase__):
return self.sp_model.encode(_a , out_type=_a)
def snake_case_ ( self , lowerCAmelCase__):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def snake_case_ ( self , lowerCAmelCase__):
return self.fairseq_ids_to_tokens[index]
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = """""".join(_a).replace(_a , """ """).strip()
return out_string
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None):
if not os.path.isdir(_a):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
__SCREAMING_SNAKE_CASE = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
__SCREAMING_SNAKE_CASE = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file) != os.path.abspath(_a) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _a)
elif not os.path.isfile(self.vocab_file):
with open(_a , """wb""") as fi:
__SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(_a)
if os.path.abspath(self.monolingual_vocab_file) != os.path.abspath(
_a) and os.path.isfile(self.monolingual_vocab_file):
copyfile(self.monolingual_vocab_file , _a)
elif not os.path.isfile(self.monolingual_vocab_file):
with open(_a , """w""" , encoding="""utf-8""") as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"{str(_a)} \n")
return out_vocab_file, out_monolingual_vocab_file
| 361 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 255 | 0 |
'''simple docstring'''
import math
def SCREAMING_SNAKE_CASE_ ( ) -> None:
_a : List[str] =input("""Enter message: """ )
_a : Optional[Any] =int(input(F"Enter key [2-{len(_UpperCAmelCase ) - 1}]: " ) )
_a : List[Any] =input("""Encryption/Decryption [e/d]: """ )
if mode.lower().startswith("""e""" ):
_a : Optional[Any] =encrypt_message(_UpperCAmelCase ,_UpperCAmelCase )
elif mode.lower().startswith("""d""" ):
_a : Tuple =decrypt_message(_UpperCAmelCase ,_UpperCAmelCase )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F"Output:\n{text + '|'}" )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : str ) -> str:
_a : int =[""""""] * key
for col in range(_UpperCAmelCase ):
_a : List[Any] =col
while pointer < len(_UpperCAmelCase ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : str ) -> str:
_a : Union[str, Any] =math.ceil(len(_UpperCAmelCase ) / key )
_a : List[Any] =key
_a : List[Any] =(num_cols * num_rows) - len(_UpperCAmelCase )
_a : Union[str, Any] =[""""""] * num_cols
_a : List[str] =0
_a : str =0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
_a : List[Any] =0
row += 1
return "".join(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 276 |
'''simple docstring'''
class A__ :
def __init__( self :List[Any] ) -> None:
'''simple docstring'''
_a : dict[str, TrieNode] ={} # Mapping from char to TrieNode
_a : List[str] =False
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :list[str] ) -> None:
'''simple docstring'''
for word in words:
self.insert(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :str ) -> None:
'''simple docstring'''
_a : str =self
for char in word:
if char not in curr.nodes:
_a : Dict =TrieNode()
_a : List[Any] =curr.nodes[char]
_a : int =True
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :str ) -> bool:
'''simple docstring'''
_a : int =self
for char in word:
if char not in curr.nodes:
return False
_a : List[Any] =curr.nodes[char]
return curr.is_leaf
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :str ) -> None:
'''simple docstring'''
def _delete(SCREAMING_SNAKE_CASE :TrieNode , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :int ) -> bool:
if index == len(SCREAMING_SNAKE_CASE ):
# If word does not exist
if not curr.is_leaf:
return False
_a : Any =False
return len(curr.nodes ) == 0
_a : int =word[index]
_a : int =curr.nodes.get(SCREAMING_SNAKE_CASE )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
_a : List[Any] =_delete(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , SCREAMING_SNAKE_CASE , 0 )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : TrieNode ,_UpperCAmelCase : str ) -> None:
if node.is_leaf:
print(_UpperCAmelCase ,end=""" """ )
for key, value in node.nodes.items():
print_words(_UpperCAmelCase ,word + key )
def SCREAMING_SNAKE_CASE_ ( ) -> bool:
_a : List[str] ="""banana bananas bandana band apple all beast""".split()
_a : List[Any] =TrieNode()
root.insert_many(_UpperCAmelCase )
# print_words(root, "")
assert all(root.find(_UpperCAmelCase ) for word in words )
assert root.find("""banana""" )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
assert root.find("""apple""" )
assert root.find("""all""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : bool ) -> None:
print(str(_UpperCAmelCase ) ,"""works!""" if passes else """doesn't work :(""" )
def SCREAMING_SNAKE_CASE_ ( ) -> None:
assert test_trie()
def SCREAMING_SNAKE_CASE_ ( ) -> None:
print_results("""Testing trie functionality""" ,test_trie() )
if __name__ == "__main__":
main()
| 276 | 1 |
from math import isqrt
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = False
return [i for i in range(2 , __UpperCamelCase ) if is_prime[i]]
def a__ ( __UpperCamelCase = 1_0**8 ):
SCREAMING_SNAKE_CASE_ = calculate_prime_numbers(max_number // 2 )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = len(__UpperCamelCase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f"{solution() = }")
| 305 | import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 305 | 1 |
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
_lowerCAmelCase = '''src/transformers'''
_lowerCAmelCase = '''docs/source/en/tasks'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
with open(snake_case__ , "r" , encoding="utf-8" , newline="\n" ) as f:
__UpperCamelCase : str = f.readlines()
# Find the start prompt.
__UpperCamelCase : Dict = 0
while not lines[start_index].startswith(snake_case__ ):
start_index += 1
start_index += 1
__UpperCamelCase : Dict = start_index
while not lines[end_index].startswith(snake_case__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
_lowerCAmelCase = direct_transformers_import(TRANSFORMERS_PATH)
_lowerCAmelCase = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
_lowerCAmelCase = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Optional[Any] = TASK_GUIDE_TO_MODELS[task_guide]
__UpperCamelCase : str = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() )
__UpperCamelCase : Union[str, Any] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def __lowerCAmelCase ( snake_case__ , snake_case__=False ):
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Union[str, Any] = _find_text_in_file(
filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
__UpperCamelCase : List[str] = get_model_list_for_task(snake_case__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(snake_case__ , snake_case__ ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
" to fix this." )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_lowerCAmelCase = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 298 |
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
_lowerCAmelCase = '''src/transformers'''
_lowerCAmelCase = '''docs/source/en/tasks'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
with open(snake_case__ , "r" , encoding="utf-8" , newline="\n" ) as f:
__UpperCamelCase : str = f.readlines()
# Find the start prompt.
__UpperCamelCase : Dict = 0
while not lines[start_index].startswith(snake_case__ ):
start_index += 1
start_index += 1
__UpperCamelCase : Dict = start_index
while not lines[end_index].startswith(snake_case__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
_lowerCAmelCase = direct_transformers_import(TRANSFORMERS_PATH)
_lowerCAmelCase = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
_lowerCAmelCase = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Optional[Any] = TASK_GUIDE_TO_MODELS[task_guide]
__UpperCamelCase : str = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() )
__UpperCamelCase : Union[str, Any] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def __lowerCAmelCase ( snake_case__ , snake_case__=False ):
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Union[str, Any] = _find_text_in_file(
filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
__UpperCamelCase : List[str] = get_model_list_for_task(snake_case__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(snake_case__ , snake_case__ ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
" to fix this." )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_lowerCAmelCase = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 298 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : int = num_of_nodes
lowerCAmelCase : list[list[int]] = []
lowerCAmelCase : dict[int, int] = {}
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
self.m_edges.append([u_node, v_node, weight] )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if self.m_component[u_node] != u_node:
for k in self.m_component:
lowerCAmelCase : Optional[int] = self.find_component(snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
if component_size[u_node] <= component_size[v_node]:
lowerCAmelCase : Any = v_node
component_size[v_node] += component_size[u_node]
self.set_component(snake_case__ )
elif component_size[u_node] >= component_size[v_node]:
lowerCAmelCase : str = self.find_component(snake_case__ )
component_size[u_node] += component_size[v_node]
self.set_component(snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = []
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
lowerCAmelCase : List[Any] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Any = edge
lowerCAmelCase : Optional[int] = self.m_component[u]
lowerCAmelCase : List[Any] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
lowerCAmelCase : int = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[int] = edge
lowerCAmelCase : List[str] = self.m_component[u]
lowerCAmelCase : List[Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(snake_case__ , snake_case__ , snake_case__ )
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
lowerCAmelCase : Union[str, Any] = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def a__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 133 |
"""simple docstring"""
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def a__ ( SCREAMING_SNAKE_CASE : str ): # picklable for multiprocessing
'''simple docstring'''
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def a__ ( ):
'''simple docstring'''
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
lowerCAmelCase : List[str] = [1, 2, 3]
with pytest.raises(SCREAMING_SNAKE_CASE ):
with parallel_backend("unsupported backend" ):
map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_proc=2 )
with pytest.raises(SCREAMING_SNAKE_CASE ):
with parallel_backend("unsupported backend" ):
map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def a__ ( SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase : Tuple = [1, 2]
lowerCAmelCase : int = {"a": 1, "b": 2}
lowerCAmelCase : List[str] = {"a": [1, 2], "b": [3, 4]}
lowerCAmelCase : Dict = {"a": {"1": 1}, "b": 2}
lowerCAmelCase : Tuple = {"a": 1, "b": 2, "c": 3, "d": 4}
lowerCAmelCase : Any = [2, 3]
lowerCAmelCase : Any = {"a": 2, "b": 3}
lowerCAmelCase : Optional[int] = {"a": [2, 3], "b": [4, 5]}
lowerCAmelCase : Optional[int] = {"a": {"1": 2}, "b": 3}
lowerCAmelCase : str = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
assert map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
assert map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
assert map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
assert map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
| 133 | 1 |
"""simple docstring"""
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase_ (__A , unittest.TestCase ):
__magic_name__ = BertTokenizer
__magic_name__ = BertTokenizerFast
__magic_name__ = True
__magic_name__ = True
__magic_name__ = filter_non_english
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
super().setUp()
UpperCAmelCase_ : Tuple = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCAmelCase_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : str ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = "UNwant\u00E9d,running"
UpperCAmelCase_ : Any = "unwanted, running"
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
UpperCAmelCase_ : Any = self.tokenizer_class(self.vocab_file )
UpperCAmelCase_ : Tuple = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(lowerCAmelCase_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [9, 6, 7, 12, 10, 11] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : List[Any] = self.get_rust_tokenizer()
UpperCAmelCase_ : List[Any] = "UNwant\u00E9d,running"
UpperCAmelCase_ : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase_ )
UpperCAmelCase_ : int = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Any = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = self.get_rust_tokenizer()
UpperCAmelCase_ : Dict = tokenizer.encode(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# With lower casing
UpperCAmelCase_ : Tuple = self.get_tokenizer(do_lower_case=lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = self.get_rust_tokenizer(do_lower_case=lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = "UNwant\u00E9d,running"
UpperCAmelCase_ : List[Any] = tokenizer.tokenize(lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = self.get_rust_tokenizer()
UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
UpperCAmelCase_ : Optional[Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
UpperCAmelCase_ : Optional[Any] = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
UpperCAmelCase_ : List[str] = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def _SCREAMING_SNAKE_CASE ( self : int ) -> int:
UpperCAmelCase_ : Any = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
UpperCAmelCase_ : Tuple = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
UpperCAmelCase_ : List[str] = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
UpperCAmelCase_ : Union[str, Any] = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
UpperCAmelCase_ : Tuple = BasicTokenizer(do_lower_case=lowerCAmelCase_ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
UpperCAmelCase_ : Tuple = BasicTokenizer()
UpperCAmelCase_ : Dict = "a\n'll !!to?'d of, can't."
UpperCAmelCase_ : List[str] = ["a", "'", "ll", "!", "!", "to", "?", "'", "d", "of", ",", "can", "'", "t", "."]
self.assertListEqual(tokenizer.tokenize(lowerCAmelCase_ ) , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
UpperCAmelCase_ : int = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
UpperCAmelCase_ : Tuple = {}
for i, token in enumerate(lowerCAmelCase_ ):
UpperCAmelCase_ : Optional[int] = i
UpperCAmelCase_ : Optional[Any] = WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : List[str] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCAmelCase_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCAmelCase_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = self.tokenizer_class.from_pretrained("bert-base-uncased" )
UpperCAmelCase_ : Any = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_ : str = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
UpperCAmelCase_ : Tuple = tokenizer_r.encode_plus(
lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , )
UpperCAmelCase_ : Optional[int] = tokenizer_r.do_lower_case if hasattr(lowerCAmelCase_ , "do_lower_case" ) else False
UpperCAmelCase_ : List[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = ["的", "人", "有"]
UpperCAmelCase_ : Tuple = "".join(lowerCAmelCase_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Any = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : Dict = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : Dict = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ )
UpperCAmelCase_ : Any = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : int = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCAmelCase_ : Tuple = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(lowerCAmelCase_ )
]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 268 |
"""simple docstring"""
from __future__ import annotations
class UpperCamelCase_ :
def __init__( self : Any , lowerCAmelCase_ : int ) -> None:
UpperCAmelCase_ : Any = data
UpperCAmelCase_ : Node | None = None
UpperCAmelCase_ : Node | None = None
def snake_case ( A__ ): # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def snake_case ( A__ ):
return 1 + max(depth_of_tree(tree.left ) ,depth_of_tree(tree.right ) ) if tree else 0
def snake_case ( A__ ):
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def snake_case ( ): # Main function for testing.
UpperCAmelCase_ : List[str] = Node(1 )
UpperCAmelCase_ : Any = Node(2 )
UpperCAmelCase_ : Optional[Any] = Node(3 )
UpperCAmelCase_ : Union[str, Any] = Node(4 )
UpperCAmelCase_ : int = Node(5 )
UpperCAmelCase_ : Optional[int] = Node(6 )
UpperCAmelCase_ : Any = Node(7 )
UpperCAmelCase_ : List[str] = Node(8 )
UpperCAmelCase_ : List[Any] = Node(9 )
print(is_full_binary_tree(A__ ) )
print(depth_of_tree(A__ ) )
print("Tree is: " )
display(A__ )
if __name__ == "__main__":
main()
| 268 | 1 |
'''simple docstring'''
from typing import Any
class A__ :
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : Any ) -> str:
"""simple docstring"""
_UpperCAmelCase : Any = data
_UpperCAmelCase : List[str] = None
class A__ :
"""simple docstring"""
def __init__( self : List[str] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = None
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Dict = self.head
while temp is not None:
print(temp.data , end=" " )
_UpperCAmelCase : List[Any] = temp.next
print()
def _lowerCAmelCase ( self : str , lowerCAmelCase__ : Any ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Dict = Node(lowerCAmelCase__ )
_UpperCAmelCase : Tuple = self.head
_UpperCAmelCase : Any = new_node
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
if node_data_a == node_data_a:
return
else:
_UpperCAmelCase : str = self.head
while node_a is not None and node_a.data != node_data_a:
_UpperCAmelCase : Optional[Any] = node_a.next
_UpperCAmelCase : Optional[int] = self.head
while node_a is not None and node_a.data != node_data_a:
_UpperCAmelCase : List[str] = node_a.next
if node_a is None or node_a is None:
return
_UpperCAmelCase , _UpperCAmelCase : Tuple = node_a.data, node_a.data
if __name__ == "__main__":
__a = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('After swapping')
ll.print_list() | 352 | '''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __UpperCAmelCase ( a_: str ):
for param in module.parameters():
_UpperCAmelCase : Any = False
def __UpperCAmelCase ( ):
_UpperCAmelCase : Union[str, Any] = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
_UpperCAmelCase : int = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def __UpperCAmelCase ( a_: Optional[Any] ):
_UpperCAmelCase : int = plt.imshow(a_ )
fig.axes.get_xaxis().set_visible(a_ )
fig.axes.get_yaxis().set_visible(a_ )
plt.show()
def __UpperCAmelCase ( ):
_UpperCAmelCase : Dict = datetime.now()
_UpperCAmelCase : List[str] = current_time.strftime("%H:%M:%S" )
return timestamp | 17 | 0 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowerCamelCase__ :
def __init__(self , UpperCAmelCase , UpperCAmelCase=1_3 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=9_9 , UpperCAmelCase=3_2 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=3_7 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=1_6 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , ) -> Optional[int]:
_lowercase =parent
_lowercase =batch_size
_lowercase =seq_length
_lowercase =is_training
_lowercase =use_input_mask
_lowercase =use_token_type_ids
_lowercase =use_labels
_lowercase =vocab_size
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =max_position_embeddings
_lowercase =type_vocab_size
_lowercase =type_sequence_label_size
_lowercase =initializer_range
_lowercase =num_labels
_lowercase =num_choices
_lowercase =scope
def __A (self ) -> Optional[int]:
_lowercase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase =None
if self.use_input_mask:
_lowercase =random_attention_mask([self.batch_size, self.seq_length] )
_lowercase =None
if self.use_token_type_ids:
_lowercase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase =None
_lowercase =None
_lowercase =None
if self.use_labels:
_lowercase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase =ids_tensor([self.batch_size] , self.num_choices )
_lowercase =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A (self ) -> Optional[Any]:
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
_lowercase =LlamaModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(UpperCAmelCase , attention_mask=UpperCAmelCase )
_lowercase =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> Any:
_lowercase =True
_lowercase =LlamaModel(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , )
_lowercase =model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , )
_lowercase =model(UpperCAmelCase , attention_mask=UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> Union[str, Any]:
_lowercase =LlamaForCausalLM(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> List[Any]:
_lowercase =True
_lowercase =True
_lowercase =LlamaForCausalLM(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
# first forward pass
_lowercase =model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , use_cache=UpperCAmelCase , )
_lowercase =outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowercase =ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowercase =ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_lowercase =torch.cat([input_ids, next_tokens] , dim=-1 )
_lowercase =torch.cat([input_mask, next_mask] , dim=-1 )
_lowercase =model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , output_hidden_states=UpperCAmelCase , )['''hidden_states'''][0]
_lowercase =model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , output_hidden_states=UpperCAmelCase , )['''hidden_states'''][0]
# select random slice
_lowercase =ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowercase =output_from_no_past[:, -3:, random_slice_idx].detach()
_lowercase =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
def __A (self ) -> Any:
_lowercase =self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) =config_and_inputs
_lowercase ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
SCREAMING_SNAKE_CASE__ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ = (LlamaForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ = (
{
'''feature-extraction''': LlamaModel,
'''text-classification''': LlamaForSequenceClassification,
'''text-generation''': LlamaForCausalLM,
'''zero-shot''': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def __A (self ) -> Optional[int]:
_lowercase =LlamaModelTester(self )
_lowercase =ConfigTester(self , config_class=UpperCAmelCase , hidden_size=3_7 )
def __A (self ) -> Dict:
self.config_tester.run_common_tests()
def __A (self ) -> Union[str, Any]:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __A (self ) -> List[Any]:
_lowercase =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowercase =type
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __A (self ) -> Union[str, Any]:
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
_lowercase =3
_lowercase =input_dict['''input_ids''']
_lowercase =input_ids.ne(1 ).to(UpperCAmelCase )
_lowercase =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowercase =LlamaForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __A (self ) -> str:
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
_lowercase =3
_lowercase ='''single_label_classification'''
_lowercase =input_dict['''input_ids''']
_lowercase =input_ids.ne(1 ).to(UpperCAmelCase )
_lowercase =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowercase =LlamaForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __A (self ) -> Dict:
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
_lowercase =3
_lowercase ='''multi_label_classification'''
_lowercase =input_dict['''input_ids''']
_lowercase =input_ids.ne(1 ).to(UpperCAmelCase )
_lowercase =ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_lowercase =LlamaForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' )
def __A (self ) -> Dict:
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def __A (self , UpperCAmelCase ) -> Optional[Any]:
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
_lowercase =ids_tensor([1, 1_0] , config.vocab_size )
_lowercase =ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
_lowercase =LlamaModel(UpperCAmelCase )
original_model.to(UpperCAmelCase )
original_model.eval()
_lowercase =original_model(UpperCAmelCase ).last_hidden_state
_lowercase =original_model(UpperCAmelCase ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
_lowercase ={'''type''': scaling_type, '''factor''': 10.0}
_lowercase =LlamaModel(UpperCAmelCase )
scaled_model.to(UpperCAmelCase )
scaled_model.eval()
_lowercase =scaled_model(UpperCAmelCase ).last_hidden_state
_lowercase =scaled_model(UpperCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-5 ) )
@require_torch
class lowerCamelCase__ ( unittest.TestCase):
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def __A (self ) -> Tuple:
_lowercase =[1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
_lowercase =LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' , device_map='''auto''' )
_lowercase =model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
_lowercase =torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_lowercase =torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , UpperCAmelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def __A (self ) -> List[str]:
_lowercase =[1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
_lowercase =LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' , device_map='''auto''' )
_lowercase =model(torch.tensor(UpperCAmelCase ) )
# Expected mean on dim = -1
_lowercase =torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_lowercase =torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , UpperCAmelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def __A (self ) -> int:
_lowercase =[1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
_lowercase =LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' , device_map='''auto''' )
_lowercase =model(torch.tensor(UpperCAmelCase ) )
# Expected mean on dim = -1
_lowercase =torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_lowercase =torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
'''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' )
@slow
def __A (self ) -> Dict:
_lowercase =[1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
_lowercase =LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' , device_map='''auto''' )
_lowercase =model(torch.tensor(UpperCAmelCase ) )
_lowercase =torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase , atol=1e-2 , rtol=1e-2 )
# fmt: off
_lowercase =torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , UpperCAmelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip('''Model is curently gated''' )
@slow
def __A (self ) -> int:
_lowercase ='''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'''
_lowercase ='''Simply put, the theory of relativity states that '''
_lowercase =LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' )
_lowercase =tokenizer.encode(UpperCAmelCase , return_tensors='''pt''' )
_lowercase =LlamaForCausalLM.from_pretrained(
'''meta-llama/Llama-2-13b-chat-hf''' , device_map='''sequential''' , use_safetensors=UpperCAmelCase )
# greedy generation outputs
_lowercase =model.generate(UpperCAmelCase , max_new_tokens=6_4 , top_p=UpperCAmelCase , temperature=1 , do_sample=UpperCAmelCase )
_lowercase =tokenizer.decode(generated_ids[0] , skip_special_tokens=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
| 5 |
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a__ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, unittest.TestCase ):
_lowerCamelCase = StableDiffusionDiffEditPipeline
_lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
_lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
_lowerCamelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowerCamelCase = frozenset([] )
def lowercase ( self : Any ) -> Dict:
torch.manual_seed(0 )
lowercase : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=lowerCAmelCase, )
lowercase : Tuple = DDIMScheduler(
beta_start=0.0_0085, beta_end=0.012, beta_schedule='scaled_linear', clip_sample=lowerCAmelCase, set_alpha_to_one=lowerCAmelCase, )
lowercase : Any = DDIMInverseScheduler(
beta_start=0.0_0085, beta_end=0.012, beta_schedule='scaled_linear', clip_sample=lowerCAmelCase, set_alpha_to_zero=lowerCAmelCase, )
torch.manual_seed(0 )
lowercase : int = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
lowercase : List[str] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act='gelu', projection_dim=512, )
lowercase : str = CLIPTextModel(lowerCAmelCase )
lowercase : int = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowercase : Tuple = {
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowercase ( self : Tuple, lowerCAmelCase : List[str], lowerCAmelCase : Tuple=0 ) -> Union[str, Any]:
lowercase : List[Any] = floats_tensor((1, 16, 16), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
lowercase : Union[str, Any] = floats_tensor((1, 2, 4, 16, 16), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
if str(lowerCAmelCase ).startswith('mps' ):
lowercase : Optional[Any] = torch.manual_seed(lowerCAmelCase )
else:
lowercase : Optional[Any] = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
lowercase : Tuple = {
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self : Union[str, Any], lowerCAmelCase : Tuple, lowerCAmelCase : Dict=0 ) -> Optional[Any]:
lowercase : Any = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
lowercase : List[str] = image.cpu().permute(0, 2, 3, 1 )[0]
lowercase : Optional[int] = Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('RGB' )
if str(lowerCAmelCase ).startswith('mps' ):
lowercase : Optional[int] = torch.manual_seed(lowerCAmelCase )
else:
lowercase : Optional[Any] = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
lowercase : List[Any] = {
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self : Optional[int], lowerCAmelCase : Any, lowerCAmelCase : List[str]=0 ) -> Union[str, Any]:
lowercase : Optional[int] = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
lowercase : Tuple = image.cpu().permute(0, 2, 3, 1 )[0]
lowercase : Tuple = Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('RGB' )
if str(lowerCAmelCase ).startswith('mps' ):
lowercase : Optional[int] = torch.manual_seed(lowerCAmelCase )
else:
lowercase : List[str] = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
lowercase : Union[str, Any] = {
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def lowercase ( self : Optional[int] ) -> str:
if not hasattr(self.pipeline_class, '_optional_components' ):
return
lowercase : Optional[int] = self.get_dummy_components()
lowercase : int = self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowercase : List[Any] = self.get_dummy_inputs(lowerCAmelCase )
lowercase : Any = pipe(**lowerCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCAmelCase )
lowercase : Any = self.pipeline_class.from_pretrained(lowerCAmelCase )
pipe_loaded.to(lowerCAmelCase )
pipe_loaded.set_progress_bar_config(disable=lowerCAmelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowerCAmelCase, lowerCAmelCase ) is None, f'''`{optional_component}` did not stay set to None after loading.''', )
lowercase : Tuple = self.get_dummy_inputs(lowerCAmelCase )
lowercase : Optional[Any] = pipe_loaded(**lowerCAmelCase )[0]
lowercase : List[Any] = np.abs(output - output_loaded ).max()
self.assertLess(lowerCAmelCase, 1e-4 )
def lowercase ( self : Any ) -> str:
lowercase : Union[str, Any] = 'cpu'
lowercase : Optional[int] = self.get_dummy_components()
lowercase : List[str] = self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowercase : Any = self.get_dummy_mask_inputs(lowerCAmelCase )
lowercase : str = pipe.generate_mask(**lowerCAmelCase )
lowercase : str = mask[0, -3:, -3:]
self.assertEqual(mask.shape, (1, 16, 16) )
lowercase : List[str] = np.array([0] * 9 )
lowercase : Dict = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase, 1e-3 )
self.assertEqual(mask[0, -3, -4], 0 )
def lowercase ( self : int ) -> str:
lowercase : int = 'cpu'
lowercase : Dict = self.get_dummy_components()
lowercase : Optional[int] = self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowercase : Any = self.get_dummy_inversion_inputs(lowerCAmelCase )
lowercase : Tuple = pipe.invert(**lowerCAmelCase ).images
lowercase : Tuple = image[0, -1, -3:, -3:]
self.assertEqual(image.shape, (2, 32, 32, 3) )
lowercase : List[Any] = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799], )
lowercase : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase, 1e-3 )
def lowercase ( self : str ) -> int:
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def lowercase ( self : List[str] ) -> Tuple:
lowercase : Dict = 'cpu'
lowercase : Any = self.get_dummy_components()
lowercase : List[Any] = {'beta_start': 0.0_0085, 'beta_end': 0.012, 'beta_schedule': 'scaled_linear'}
lowercase : List[str] = DPMSolverMultistepScheduler(**lowerCAmelCase )
lowercase : Dict = DPMSolverMultistepInverseScheduler(**lowerCAmelCase )
lowercase : str = self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowercase : List[Any] = self.get_dummy_inversion_inputs(lowerCAmelCase )
lowercase : int = pipe.invert(**lowerCAmelCase ).images
lowercase : Tuple = image[0, -1, -3:, -3:]
self.assertEqual(image.shape, (2, 32, 32, 3) )
lowercase : Dict = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799], )
lowercase : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase, 1e-3 )
@require_torch_gpu
@slow
class a__ ( unittest.TestCase ):
def lowercase ( self : Optional[Any] ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def lowercase ( cls : Optional[int] ) -> Tuple:
lowercase : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' )
lowercase : Optional[Any] = raw_image.convert('RGB' ).resize((768, 768) )
lowercase : Any = raw_image
def lowercase ( self : Optional[Any] ) -> List[Any]:
lowercase : str = torch.manual_seed(0 )
lowercase : int = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1', safety_checker=lowerCAmelCase, torch_dtype=torch.floataa )
lowercase : List[str] = DDIMScheduler.from_config(pipe.scheduler.config )
lowercase : List[Any] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowercase : List[Any] = 'a bowl of fruit'
lowercase : List[Any] = 'a bowl of pears'
lowercase : int = pipe.generate_mask(
image=self.raw_image, source_prompt=lowerCAmelCase, target_prompt=lowerCAmelCase, generator=lowerCAmelCase, )
lowercase : Tuple = pipe.invert(
prompt=lowerCAmelCase, image=self.raw_image, inpaint_strength=0.7, generator=lowerCAmelCase ).latents
lowercase : str = pipe(
prompt=lowerCAmelCase, mask_image=lowerCAmelCase, image_latents=lowerCAmelCase, generator=lowerCAmelCase, negative_prompt=lowerCAmelCase, inpaint_strength=0.7, output_type='numpy', ).images[0]
lowercase : Dict = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def lowercase ( self : Union[str, Any] ) -> List[Any]:
lowercase : Dict = torch.manual_seed(0 )
lowercase : Union[str, Any] = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1', safety_checker=lowerCAmelCase, torch_dtype=torch.floataa )
lowercase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowercase : Any = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowercase : Union[str, Any] = 'a bowl of fruit'
lowercase : List[Any] = 'a bowl of pears'
lowercase : List[Any] = pipe.generate_mask(
image=self.raw_image, source_prompt=lowerCAmelCase, target_prompt=lowerCAmelCase, generator=lowerCAmelCase, )
lowercase : List[str] = pipe.invert(
prompt=lowerCAmelCase, image=self.raw_image, inpaint_strength=0.7, generator=lowerCAmelCase, num_inference_steps=25, ).latents
lowercase : int = pipe(
prompt=lowerCAmelCase, mask_image=lowerCAmelCase, image_latents=lowerCAmelCase, generator=lowerCAmelCase, negative_prompt=lowerCAmelCase, inpaint_strength=0.7, num_inference_steps=25, output_type='numpy', ).images[0]
lowercase : Tuple = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 255 | 0 |
'''simple docstring'''
from __future__ import annotations
class lowercase__ :
def __init__( self : Any ,lowerCamelCase__ : int ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = data
_UpperCamelCase : Node | None = None
_UpperCamelCase : Node | None = None
def A__ ( UpperCAmelCase_ ): # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def A__ ( UpperCAmelCase_ ):
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def A__ ( UpperCAmelCase_ ):
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def A__ ( ): # Main function for testing.
_UpperCamelCase : Union[str, Any] = Node(1 )
_UpperCamelCase : Union[str, Any] = Node(2 )
_UpperCamelCase : Dict = Node(3 )
_UpperCamelCase : List[str] = Node(4 )
_UpperCamelCase : List[Any] = Node(5 )
_UpperCamelCase : Union[str, Any] = Node(6 )
_UpperCamelCase : str = Node(7 )
_UpperCamelCase : str = Node(8 )
_UpperCamelCase : Tuple = Node(9 )
print(is_full_binary_tree(UpperCAmelCase_ ) )
print(depth_of_tree(UpperCAmelCase_ ) )
print('Tree is: ' )
display(UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 236 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
snake_case_ : Optional[Any] = {
'configuration_clip': [
'CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPConfig',
'CLIPOnnxConfig',
'CLIPTextConfig',
'CLIPVisionConfig',
],
'processing_clip': ['CLIPProcessor'],
'tokenization_clip': ['CLIPTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[int] = ['CLIPTokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[int] = ['CLIPFeatureExtractor']
snake_case_ : Dict = ['CLIPImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = [
'CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPModel',
'CLIPPreTrainedModel',
'CLIPTextModel',
'CLIPTextModelWithProjection',
'CLIPVisionModel',
'CLIPVisionModelWithProjection',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
'TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCLIPModel',
'TFCLIPPreTrainedModel',
'TFCLIPTextModel',
'TFCLIPVisionModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[int] = [
'FlaxCLIPModel',
'FlaxCLIPPreTrainedModel',
'FlaxCLIPTextModel',
'FlaxCLIPTextPreTrainedModel',
'FlaxCLIPVisionModel',
'FlaxCLIPVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
snake_case_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 236 | 1 |
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class A ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__(self : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any=1024 , _UpperCAmelCase : Tuple=1024 , _UpperCAmelCase : str=3.6 ) -> str:
"""simple docstring"""
lowercase__ = tokenizer
lowercase__ = tokenizer.bos_token_id
lowercase__ = dataset
lowercase__ = seq_length
lowercase__ = seq_length * chars_per_token * num_of_sequences
def __iter__(self : Any ) -> Tuple:
"""simple docstring"""
lowercase__ = iter(self.dataset )
lowercase__ = True
while more_examples:
lowercase__ , lowercase__ = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(_UpperCAmelCase )["""content"""] )
buffer_len += len(buffer[-1] )
except StopIteration:
lowercase__ = False
break
lowercase__ = tokenizer(_UpperCAmelCase , truncation=_UpperCAmelCase )["""input_ids"""]
lowercase__ = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(_UpperCAmelCase ) , self.seq_length ):
lowercase__ = all_token_ids[i : i + self.seq_length]
if len(_UpperCAmelCase ) == self.seq_length:
yield torch.tensor(_UpperCAmelCase )
def UpperCamelCase ( __magic_name__ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = {"""streaming""": True}
lowercase__ = load_dataset(args.dataset_name , split="""train""" , **__magic_name__ )
lowercase__ = ConstantLengthDataset(__magic_name__ , __magic_name__ , seq_length=args.seq_length )
lowercase__ = DataLoader(__magic_name__ , batch_size=args.batch_size )
return eval_dataloader
def UpperCamelCase ( __magic_name__ : Dict ) -> int:
"""simple docstring"""
model.eval()
lowercase__ = []
for step, batch in enumerate(__magic_name__ ):
with torch.no_grad():
lowercase__ = model(__magic_name__ , labels=__magic_name__ )
lowercase__ = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(__magic_name__ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
lowercase__ = torch.mean(torch.cat(__magic_name__ ) )
try:
lowercase__ = torch.exp(__magic_name__ )
except OverflowError:
lowercase__ = float("""inf""" )
return loss.item(), perplexity.item()
# Setup Accelerator
A : int = Accelerator()
# Parse configuration
A : List[Any] = HfArgumentParser(EvaluationArguments)
A : List[Any] = parser.parse_args()
set_seed(args.seed)
# Logging
A : Union[str, Any] = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
A : str = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
A : int = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
A : Dict = create_dataloader(args)
# Prepare everything with our `accelerator`.
A , A : List[str] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
A , A : List[str] = evaluate(args)
logger.info(F'loss/eval: {eval_loss}, perplexity: {perplexity}')
| 305 |
def UpperCamelCase ( __magic_name__ : str ) -> int:
"""simple docstring"""
assert column_title.isupper()
lowercase__ = 0
lowercase__ = len(__magic_name__ ) - 1
lowercase__ = 0
while index >= 0:
lowercase__ = (ord(column_title[index] ) - 64) * pow(26 , __magic_name__ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 305 | 1 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase__ = 1_6
lowerCAmelCase__ = 3_2
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ = 16 ):
"""simple docstring"""
lowercase__ : Any = AutoTokenizer.from_pretrained("bert-base-cased" )
lowercase__ : List[Any] = load_dataset("glue" , "mrpc" )
def tokenize_function(lowerCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : List[str] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ : Union[str, Any] = datasets.map(
lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : List[Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowerCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ : Optional[int] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ : Any = 16
elif accelerator.mixed_precision != "no":
lowercase__ : int = 8
else:
lowercase__ : List[str] = None
return tokenizer.pad(
lowerCamelCase__ , padding="longest" , max_length=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_tensors="pt" , )
# Instantiate dataloaders.
lowercase__ : List[Any] = DataLoader(
tokenized_datasets["train"] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
lowercase__ : List[Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase__ = mocked_dataloaders # noqa: F811
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowerCamelCase__ ) == "1":
lowercase__ : Union[str, Any] = 2
# Initialize accelerator
lowercase__ : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : Tuple = config["lr"]
lowercase__ : List[str] = int(config["num_epochs"] )
lowercase__ : List[str] = int(config["seed"] )
lowercase__ : Union[str, Any] = int(config["batch_size"] )
lowercase__ : Union[str, Any] = evaluate.load("glue" , "mrpc" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowerCamelCase__ )
def inner_training_loop(lowerCamelCase__ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowerCamelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowerCamelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ : Any = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ : Union[str, Any] = AdamW(params=model.parameters() , lr=lowerCamelCase__ )
lowercase__ , lowercase__ : Union[str, Any] = get_dataloaders(lowerCamelCase__ , lowerCamelCase__ )
# Instantiate scheduler
lowercase__ : Any = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase__ , num_warmup_steps=100 , num_training_steps=(len(lowerCamelCase__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[int] = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Now we train the model
for epoch in range(lowerCamelCase__ ):
model.train()
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase__ : List[Any] = model(**lowerCamelCase__ )
lowercase__ : Union[str, Any] = outputs.loss
accelerator.backward(lowerCamelCase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ : int = model(**lowerCamelCase__ )
lowercase__ : List[Any] = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ : Any = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=lowerCamelCase__ , references=lowerCamelCase__ , )
lowercase__ : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , lowerCamelCase__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Optional[int] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowerCamelCase__ , default=lowerCamelCase__ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
lowercase__ : Tuple = parser.parse_args()
lowercase__ : str = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
main()
| 121 |
from math import ceil, sqrt
def __lowerCamelCase ( lowerCamelCase__ = 1_000_000 ):
"""simple docstring"""
lowercase__ : int = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
lowercase__ : List[str] = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
lowercase__ : List[str] = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'''{solution() = }''')
| 121 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase_ : List[str] = {'configuration_van': ['VAN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VanConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : List[str] = [
'VAN_PRETRAINED_MODEL_ARCHIVE_LIST',
'VanForImageClassification',
'VanModel',
'VanPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
lowercase_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 133 |
from typing import Any
class __lowerCAmelCase :
def __init__( self : List[Any] , snake_case__ : Any ):
"""simple docstring"""
_UpperCAmelCase = data
_UpperCAmelCase = None
class __lowerCAmelCase :
def __init__( self : Optional[Any] ):
"""simple docstring"""
_UpperCAmelCase = None
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
_UpperCAmelCase = self.head
while temp is not None:
print(temp.data , end=" " )
_UpperCAmelCase = temp.next
print()
def UpperCamelCase ( self : Any , snake_case__ : Any ):
"""simple docstring"""
_UpperCAmelCase = Node(snake_case__ )
_UpperCAmelCase = self.head
_UpperCAmelCase = new_node
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : Optional[Any] ):
"""simple docstring"""
if node_data_a == node_data_a:
return
else:
_UpperCAmelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_UpperCAmelCase = node_a.next
_UpperCAmelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_UpperCAmelCase = node_a.next
if node_a is None or node_a is None:
return
_UpperCAmelCase , _UpperCAmelCase = node_a.data, node_a.data
if __name__ == "__main__":
lowercase_ : Union[str, Any] = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('After swapping')
ll.print_list()
| 133 | 1 |
from math import isclose, sqrt
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
lowerCamelCase =point_y / 4 / point_x
lowerCamelCase =2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowerCamelCase =(1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowerCamelCase =(sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowerCamelCase =outgoing_gradient**2 + 4
lowerCamelCase =2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowerCamelCase =(point_y - outgoing_gradient * point_x) ** 2 - 1_00
lowerCamelCase =(
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowerCamelCase =(
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowerCamelCase =x_minus if isclose(__lowerCamelCase , __lowerCamelCase ) else x_plus
lowerCamelCase =point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def _lowercase ( _UpperCAmelCase = 1.4 , _UpperCAmelCase = -9.6 ) -> Optional[int]:
lowerCamelCase =0
lowerCamelCase =first_x_coord
lowerCamelCase =first_y_coord
lowerCamelCase =(1_0.1 - point_y) / (0.0 - point_x)
while not (-0.0_1 <= point_x <= 0.0_1 and point_y > 0):
lowerCamelCase =next_point(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"{solution() = }")
| 351 |
import qiskit
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> qiskit.result.counts.Counts:
lowerCamelCase =qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
lowerCamelCase =qiskit.QuantumCircuit(_UpperCAmelCase , _UpperCAmelCase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
lowerCamelCase =qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
print(F"Total count for various states are: {single_qubit_measure(1, 1)}")
| 262 | 0 |
"""simple docstring"""
import sys
__UpperCamelCase = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def UpperCAmelCase ( UpperCAmelCase ) -> int:
snake_case_ = 1
for digit in s:
product *= int(UpperCAmelCase )
return product
def UpperCAmelCase ( UpperCAmelCase = N ) -> int:
snake_case_ = -sys.maxsize - 1
snake_case_ = n[:13]
snake_case_ = 13
while cur_index < len(UpperCAmelCase ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
snake_case_ = substr[1:] + n[cur_index]
cur_index += 1
else:
snake_case_ = max(UpperCAmelCase , str_eval(UpperCAmelCase ) )
snake_case_ = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 69 |
"""simple docstring"""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_a = _symbol_database.Default()
_a = _descriptor_pool.Default().AddSerializedFile(
b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'
)
_a = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
_a = None
_a = b'H\003'
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
_a = 45
_a = 15_81
_a = 15_17
_a = 15_70
_a = 15_84
_a = 17_93
_a = 17_95
_a = 19_16
_a = 18_64
_a = 19_05
_a = 19_19
_a = 24_29
_a = 22_08
_a = 24_18
_a = 23_23
_a = 24_07
# @@protoc_insertion_point(module_scope)
| 17 | 0 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
super().tearDown()
gc.collect()
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
lowerCAmelCase__ : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
lowerCAmelCase__ : Any = 'xvjiarui/stable-diffusion-2-inpainting'
lowerCAmelCase__ , lowerCAmelCase__ : str = FlaxStableDiffusionInpaintPipeline.from_pretrained(a , safety_checker=a )
lowerCAmelCase__ : int = 'Face of a yellow cat, high resolution, sitting on a park bench'
lowerCAmelCase__ : str = jax.random.PRNGKey(0 )
lowerCAmelCase__ : int = 50
lowerCAmelCase__ : int = jax.device_count()
lowerCAmelCase__ : Optional[Any] = num_samples * [prompt]
lowerCAmelCase__ : List[str] = num_samples * [init_image]
lowerCAmelCase__ : int = num_samples * [mask_image]
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = pipeline.prepare_inputs(a , a , a )
# shard inputs and rng
lowerCAmelCase__ : int = replicate(a )
lowerCAmelCase__ : str = jax.random.split(a , jax.device_count() )
lowerCAmelCase__ : Any = shard(a )
lowerCAmelCase__ : List[Any] = shard(a )
lowerCAmelCase__ : int = shard(a )
lowerCAmelCase__ : List[Any] = pipeline(
a , a , a , a , a , a , jit=a )
lowerCAmelCase__ : Any = output.images.reshape(a , 512 , 512 , 3 )
lowerCAmelCase__ : Union[str, Any] = images[0, 253:256, 253:256, -1]
lowerCAmelCase__ : Optional[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase__ : str = jnp.array(
[0.3_6_1_1_3_0_7, 0.3_7_6_4_9_7_3_6, 0.3_7_5_7_4_0_8, 0.3_8_2_1_3_9_5_3, 0.3_9_2_9_5_1_6_7, 0.3_8_4_1_6_3_1, 0.4_1_5_5_4_9_7_8, 0.4_1_3_7_4_7_5, 0.4_2_1_7_0_8_4] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 | 307 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
lowerCamelCase__ = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
lowerCamelCase__ = concatenate_datasets
lowerCamelCase__ = DownloadConfig
lowerCamelCase__ = DownloadManager
lowerCamelCase__ = DownloadMode
lowerCamelCase__ = DownloadConfig
lowerCamelCase__ = DownloadMode
lowerCamelCase__ = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager | 307 | 1 |
_UpperCAmelCase : int = range(2, 20 + 1)
_UpperCAmelCase : List[Any] = [10**k for k in range(ks[-1] + 1)]
_UpperCAmelCase : dict[int, dict[int, list[list[int]]]] = {}
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowercase :Optional[int] = sum(a_i[j] for j in range(lowerCamelCase, len(lowerCamelCase ) ) )
lowercase :Union[str, Any] = sum(a_i[j] * base[j] for j in range(min(len(lowerCamelCase ), lowerCamelCase ) ) )
lowercase , lowercase :str = 0, 0
lowercase :Optional[int] = n - i
lowercase :List[Any] = memo.get(lowerCamelCase )
if sub_memo is not None:
lowercase :Optional[Any] = sub_memo.get(lowerCamelCase )
if jumps is not None and len(lowerCamelCase ) > 0:
# find and make the largest jump without going over
lowercase :Optional[Any] = -1
for _k in range(len(lowerCamelCase ) - 1, -1, -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
lowercase :Dict = _k
break
if max_jump >= 0:
lowercase , lowercase , lowercase :Optional[Any] = jumps[max_jump]
# since the difference between jumps is cached, add c
lowercase :int = diff + c
for j in range(min(lowerCamelCase, len(lowerCamelCase ) ) ):
lowercase , lowercase :List[Any] = divmod(lowerCamelCase, 10 )
if new_c > 0:
add(lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
lowercase :Optional[Any] = []
else:
lowercase :Any = {c: []}
lowercase :Dict = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
lowercase , lowercase :Optional[Any] = next_term(lowerCamelCase, k - 1, i + dn, lowerCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
lowercase , lowercase :Dict = compute(lowerCamelCase, lowerCamelCase, i + dn, lowerCamelCase )
diff += _diff
dn += terms_jumped
lowercase :Any = sub_memo[c]
# keep jumps sorted by # of terms skipped
lowercase :Optional[int] = 0
while j < len(lowerCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowerCamelCase, (diff, dn, k) )
return (diff, dn)
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
if i >= n:
return 0, i
if k > len(lowerCamelCase ):
a_i.extend([0 for _ in range(k - len(lowerCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
lowercase :List[Any] = i
lowercase , lowercase , lowercase :str = 0, 0, 0
for j in range(len(lowerCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
lowercase :int = ds_c + ds_b
diff += addend
lowercase :List[str] = 0
for j in range(lowerCamelCase ):
lowercase :int = a_i[j] + addend
lowercase , lowercase :str = divmod(lowerCamelCase, 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowerCamelCase, lowerCamelCase, lowerCamelCase )
return diff, i - start_i
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
for j in range(lowerCamelCase, len(lowerCamelCase ) ):
lowercase :Any = digits[j] + addend
if s >= 10:
lowercase , lowercase :str = divmod(lowerCamelCase, 10 )
lowercase :Union[str, Any] = addend // 10 + quotient
else:
lowercase :Optional[int] = s
lowercase :str = addend // 10
if addend == 0:
break
while addend > 0:
lowercase , lowercase :Optional[int] = divmod(lowerCamelCase, 10 )
digits.append(lowerCamelCase )
def UpperCAmelCase__ ( lowerCamelCase = 10**15 ):
lowercase :int = [1]
lowercase :str = 1
lowercase :Tuple = 0
while True:
lowercase , lowercase :List[str] = next_term(lowerCamelCase, 20, i + dn, lowerCamelCase )
dn += terms_jumped
if dn == n - i:
break
lowercase :Any = 0
for j in range(len(lowerCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 236 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCAmelCase : int = {"configuration_swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig", "SwinOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwinForImageClassification",
"SwinForMaskedImageModeling",
"SwinModel",
"SwinPreTrainedModel",
"SwinBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = [
"TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSwinForImageClassification",
"TFSwinForMaskedImageModeling",
"TFSwinModel",
"TFSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 236 | 1 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('''Googling.....''')
_UpperCamelCase = '''https://www.google.com/search?q=''' + ''' '''.join(sys.argv[1:])
_UpperCamelCase = requests.get(url, headers={'''UserAgent''': UserAgent().random})
# res.raise_for_status()
with open('''project1a.html''', '''wb''') as out_file: # only for knowing the class
for data in res.iter_content(1_0000):
out_file.write(data)
_UpperCamelCase = BeautifulSoup(res.text, '''html.parser''')
_UpperCamelCase = list(soup.select('''.eZt8xd'''))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('''href'''))
else:
webbrowser.open(F'https://google.com{link.get("href")}')
| 16 |
'''simple docstring'''
from collections.abc import Sequence
def lowercase_ ( lowerCAmelCase__ : Sequence[int] | None = None ):
"""simple docstring"""
if nums is None or not nums:
raise ValueError("""Input sequence should not be empty""" )
__UpperCAmelCase : Any = nums[0]
for i in range(1 , len(lowerCAmelCase__ ) ):
__UpperCAmelCase : Union[str, Any] = nums[i]
__UpperCAmelCase : List[Any] = max(lowerCAmelCase__ , ans + num , lowerCAmelCase__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
_UpperCamelCase = int(input('''Enter number of elements : ''').strip())
_UpperCamelCase = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 16 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: int = tempfile.mkdtemp()
_A: str = SamImageProcessor()
_A: List[str] = SamProcessor(lowerCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
def __magic_name__ ( self : str , **lowerCAmelCase_ : List[Any] ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).image_processor
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: List[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_A: List[Any] = [Image.fromarray(np.moveaxis(lowerCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: List[Any] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_A: Dict = self.get_image_processor(do_normalize=lowerCAmelCase_ , padding_value=1.0 )
_A: str = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowerCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase_ )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Union[str, Any] = self.get_image_processor()
_A: Any = SamProcessor(image_processor=lowerCAmelCase_ )
_A: int = self.prepare_image_inputs()
_A: str = image_processor(lowerCAmelCase_ , return_tensors='''np''' )
_A: Optional[Any] = processor(images=lowerCAmelCase_ , return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def __magic_name__ ( self : str ):
"""simple docstring"""
_A: int = self.get_image_processor()
_A: int = SamProcessor(image_processor=lowerCAmelCase_ )
_A: Any = [torch.ones((1, 3, 5, 5) )]
_A: List[str] = [[1_7_6_4, 2_6_4_6]]
_A: Optional[Any] = [[6_8_3, 1_0_2_4]]
_A: List[str] = processor.post_process_masks(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
_A: int = processor.post_process_masks(
lowerCAmelCase_ , torch.tensor(lowerCAmelCase_ ) , torch.tensor(lowerCAmelCase_ ) )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
# should also work with np
_A: List[str] = [np.ones((1, 3, 5, 5) )]
_A: str = processor.post_process_masks(lowerCAmelCase_ , np.array(lowerCAmelCase_ ) , np.array(lowerCAmelCase_ ) )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
_A: Tuple = [[1, 0], [0, 1]]
with self.assertRaises(lowerCAmelCase_ ):
_A: List[str] = processor.post_process_masks(lowerCAmelCase_ , np.array(lowerCAmelCase_ ) , np.array(lowerCAmelCase_ ) )
@require_vision
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: Any = tempfile.mkdtemp()
_A: Any = SamImageProcessor()
_A: int = SamProcessor(lowerCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
def __magic_name__ ( self : Optional[Any] , **lowerCAmelCase_ : int ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).image_processor
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Tuple = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_A: List[str] = [Image.fromarray(np.moveaxis(lowerCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: int = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_A: List[Any] = self.get_image_processor(do_normalize=lowerCAmelCase_ , padding_value=1.0 )
_A: Optional[Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowerCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase_ )
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: Optional[Any] = self.get_image_processor()
_A: Any = SamProcessor(image_processor=lowerCAmelCase_ )
_A: List[str] = self.prepare_image_inputs()
_A: Union[str, Any] = image_processor(lowerCAmelCase_ , return_tensors='''np''' )
_A: Union[str, Any] = processor(images=lowerCAmelCase_ , return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def __magic_name__ ( self : str ):
"""simple docstring"""
_A: List[Any] = self.get_image_processor()
_A: Dict = SamProcessor(image_processor=lowerCAmelCase_ )
_A: Dict = [tf.ones((1, 3, 5, 5) )]
_A: List[Any] = [[1_7_6_4, 2_6_4_6]]
_A: Any = [[6_8_3, 1_0_2_4]]
_A: Optional[Any] = processor.post_process_masks(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , return_tensors='''tf''' )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
_A: str = processor.post_process_masks(
lowerCAmelCase_ , tf.convert_to_tensor(lowerCAmelCase_ ) , tf.convert_to_tensor(lowerCAmelCase_ ) , return_tensors='''tf''' , )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
# should also work with np
_A: Dict = [np.ones((1, 3, 5, 5) )]
_A: Optional[Any] = processor.post_process_masks(
lowerCAmelCase_ , np.array(lowerCAmelCase_ ) , np.array(lowerCAmelCase_ ) , return_tensors='''tf''' )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
_A: int = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
_A: int = processor.post_process_masks(
lowerCAmelCase_ , np.array(lowerCAmelCase_ ) , np.array(lowerCAmelCase_ ) , return_tensors='''tf''' )
@require_vision
@require_torchvision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Optional[Any] = tempfile.mkdtemp()
_A: List[str] = SamImageProcessor()
_A: Union[str, Any] = SamProcessor(lowerCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
def __magic_name__ ( self : Union[str, Any] , **lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).image_processor
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A: Dict = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_A: Dict = [Image.fromarray(np.moveaxis(lowerCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: str = self.get_image_processor()
_A: Optional[Any] = SamProcessor(image_processor=lowerCAmelCase_ )
_A: Optional[Any] = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
_A: List[Any] = [tf.convert_to_tensor(lowerCAmelCase_ )]
_A: Optional[Any] = [torch.tensor(lowerCAmelCase_ )]
_A: Dict = [[1_7_6_4, 2_6_4_6]]
_A: Tuple = [[6_8_3, 1_0_2_4]]
_A: Union[str, Any] = processor.post_process_masks(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , return_tensors='''tf''' )
_A: Optional[Any] = processor.post_process_masks(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , return_tensors='''pt''' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: Optional[Any] = self.get_image_processor()
_A: Tuple = SamProcessor(image_processor=lowerCAmelCase_ )
_A: Any = self.prepare_image_inputs()
_A: List[Any] = image_processor(lowerCAmelCase_ , return_tensors='''pt''' )['''pixel_values'''].numpy()
_A: List[Any] = processor(images=lowerCAmelCase_ , return_tensors='''pt''' )['''pixel_values'''].numpy()
_A: Optional[int] = image_processor(lowerCAmelCase_ , return_tensors='''tf''' )['''pixel_values'''].numpy()
_A: int = processor(images=lowerCAmelCase_ , return_tensors='''tf''' )['''pixel_values'''].numpy()
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ ) )
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ ) )
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ ) )
| 121 |
import random
from .binary_exp_mod import bin_exp_mod
def lowerCamelCase__ ( a , a=10_00 ) -> Optional[int]:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
_A: List[Any] = n - 1
_A: Dict = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
_A: List[str] = 0
while count < prec:
_A: Optional[int] = random.randint(2 , n - 1 )
_A: Union[str, Any] = bin_exp_mod(a , a , a )
if b != 1:
_A: Optional[Any] = True
for _ in range(a ):
if b == n - 1:
_A: int = False
break
_A: Optional[Any] = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
UpperCAmelCase__ : Dict = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 121 | 1 |
UpperCAmelCase_ = '\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
UpperCAmelCase_ = [{'type': 'code', 'content': INSTALL_CONTENT}]
UpperCAmelCase_ = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 369 |
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
while b:
UpperCAmelCase__ , UpperCAmelCase__ = b, a % b
return a
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(SCREAMING_SNAKE_CASE__ , a % b )
def _UpperCamelCase ( ):
'''simple docstring'''
print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main()
| 61 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.